summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xTI_OMAP4_Kernel_Release_Notes_L24.x.txt480
-rw-r--r--arch/arm/Kconfig19
-rw-r--r--arch/arm/configs/omap4_pm_defconfig4
-rw-r--r--arch/arm/configs/omap_4430sdp_defconfig515
-rw-r--r--arch/arm/configs/omap_4430simulator_defconfig1017
-rw-r--r--arch/arm/include/asm/cacheflush.h27
-rwxr-xr-x[-rw-r--r--]arch/arm/include/asm/io.h3
-rw-r--r--arch/arm/include/asm/tlbflush.h29
-rw-r--r--arch/arm/include/asm/unistd.h10
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/traps.c18
-rw-r--r--arch/arm/mach-omap2/Kconfig26
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rwxr-xr-xarch/arm/mach-omap2/board-4430sdp-wifi.c138
-rwxr-xr-x[-rw-r--r--]arch/arm/mach-omap2/board-4430sdp.c365
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c17
-rw-r--r--arch/arm/mach-omap2/dmtimers.c12
-rw-r--r--arch/arm/mach-omap2/gpio.c25
-rw-r--r--arch/arm/mach-omap2/include/mach/dmm.h128
-rw-r--r--arch/arm/mach-omap2/include/mach/tiler.h116
-rw-r--r--arch/arm/mach-omap2/iommu2.c7
-rwxr-xr-x[-rw-r--r--]arch/arm/mach-omap2/mailbox.c76
-rw-r--r--arch/arm/mach-omap2/mcbsp.c838
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c173
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.h6
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c60
-rw-r--r--arch/arm/mach-omap2/omap4-iommu.c110
-rw-r--r--arch/arm/mach-omap2/omap44xx-smc.S2
-rw-r--r--arch/arm/mach-omap2/pm.h2
-rw-r--r--arch/arm/mach-omap2/pm44xx.c24
-rw-r--r--arch/arm/mach-omap2/serial.c68
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S48
-rw-r--r--arch/arm/mm/cache-fa.S6
-rw-r--r--arch/arm/mm/cache-v3.S29
-rw-r--r--arch/arm/mm/cache-v4.S29
-rw-r--r--arch/arm/mm/cache-v4wb.S6
-rw-r--r--arch/arm/mm/cache-v4wt.S15
-rw-r--r--arch/arm/mm/cache-v6.S6
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/dma-mapping.c83
-rwxr-xr-x[-rw-r--r--]arch/arm/mm/ioremap.c103
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/proc-arm1020.S6
-rw-r--r--arch/arm/mm/proc-arm1020e.S6
-rw-r--r--arch/arm/mm/proc-arm1022.S6
-rw-r--r--arch/arm/mm/proc-arm1026.S6
-rw-r--r--arch/arm/mm/proc-arm920.S6
-rw-r--r--arch/arm/mm/proc-arm922.S6
-rw-r--r--arch/arm/mm/proc-arm925.S6
-rw-r--r--arch/arm/mm/proc-arm926.S6
-rw-r--r--arch/arm/mm/proc-arm940.S6
-rw-r--r--arch/arm/mm/proc-arm946.S6
-rw-r--r--arch/arm/mm/proc-feroceon.S12
-rw-r--r--arch/arm/mm/proc-mohawk.S6
-rw-r--r--arch/arm/mm/proc-xsc3.S6
-rw-r--r--arch/arm/mm/proc-xscale.S8
-rw-r--r--arch/arm/mm/tlb-v7.S8
-rw-r--r--arch/arm/plat-omap/Makefile2
-rw-r--r--arch/arm/plat-omap/devices.c58
-rwxr-xr-xarch/arm/plat-omap/hdmi_lib.c1299
-rwxr-xr-x[-rw-r--r--]arch/arm/plat-omap/include/plat/display.h124
-rwxr-xr-xarch/arm/plat-omap/include/plat/hdmi_lib.h428
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h2
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h185
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h18
-rw-r--r--arch/arm/plat-omap/include/plat/omap-serial.h128
-rwxr-xr-xarch/arm/plat-omap/include/plat/wifi_tiwlan.h23
-rw-r--r--arch/arm/plat-omap/include/syslink/GlobalTypes.h154
-rw-r--r--arch/arm/plat-omap/include/syslink/MBXAccInt.h550
-rw-r--r--arch/arm/plat-omap/include/syslink/MBXRegAcM.h3027
-rw-r--r--arch/arm/plat-omap/include/syslink/MLBAccInt.h132
-rw-r--r--arch/arm/plat-omap/include/syslink/MLBRegAcM.h206
-rw-r--r--arch/arm/plat-omap/include/syslink/MMUAccInt.h180
-rw-r--r--arch/arm/plat-omap/include/syslink/MMURegAcM.h434
-rw-r--r--arch/arm/plat-omap/include/syslink/_sysmgr.h50
-rw-r--r--arch/arm/plat-omap/include/syslink/atomic_linux.h105
-rw-r--r--arch/arm/plat-omap/include/syslink/ducatienabler.h291
-rw-r--r--arch/arm/plat-omap/include/syslink/gate_remote.h34
-rw-r--r--arch/arm/plat-omap/include/syslink/gatepeterson.h167
-rw-r--r--arch/arm/plat-omap/include/syslink/gatepeterson_ioctl.h193
-rw-r--r--arch/arm/plat-omap/include/syslink/gt.h320
-rw-r--r--arch/arm/plat-omap/include/syslink/heap.h91
-rw-r--r--arch/arm/plat-omap/include/syslink/heapbuf.h152
-rw-r--r--arch/arm/plat-omap/include/syslink/heapbuf_ioctl.h215
-rw-r--r--arch/arm/plat-omap/include/syslink/host_os.h72
-rw-r--r--arch/arm/plat-omap/include/syslink/hw_defs.h63
-rw-r--r--arch/arm/plat-omap/include/syslink/hw_mbox.h447
-rw-r--r--arch/arm/plat-omap/include/syslink/hw_mmu.h171
-rw-r--r--arch/arm/plat-omap/include/syslink/hw_ocp.h60
-rw-r--r--arch/arm/plat-omap/include/syslink/ipc_ioctl.h92
-rw-r--r--arch/arm/plat-omap/include/syslink/listmp.h267
-rw-r--r--arch/arm/plat-omap/include/syslink/listmp_sharedmemory.h289
-rw-r--r--arch/arm/plat-omap/include/syslink/listmp_sharedmemory_ioctl.h258
-rw-r--r--arch/arm/plat-omap/include/syslink/messageq.h464
-rw-r--r--arch/arm/plat-omap/include/syslink/messageq_ioctl.h237
-rw-r--r--arch/arm/plat-omap/include/syslink/messageq_transportshm.h283
-rw-r--r--arch/arm/plat-omap/include/syslink/messageq_transportshm_ioctl.h160
-rw-r--r--arch/arm/plat-omap/include/syslink/multiproc.h83
-rw-r--r--arch/arm/plat-omap/include/syslink/multiproc_ioctl.h94
-rw-r--r--arch/arm/plat-omap/include/syslink/nameserver.h131
-rw-r--r--arch/arm/plat-omap/include/syslink/nameserver_ioctl.h230
-rw-r--r--arch/arm/plat-omap/include/syslink/nameserver_remote.h39
-rw-r--r--arch/arm/plat-omap/include/syslink/nameserver_remotenotify.h100
-rw-r--r--arch/arm/plat-omap/include/syslink/nameserver_remotenotify_ioctl.h163
-rw-r--r--arch/arm/plat-omap/include/syslink/notify.h267
-rw-r--r--arch/arm/plat-omap/include/syslink/notify_dispatcher.h158
-rw-r--r--arch/arm/plat-omap/include/syslink/notify_driver.h44
-rw-r--r--arch/arm/plat-omap/include/syslink/notify_driverdefs.h440
-rw-r--r--arch/arm/plat-omap/include/syslink/notify_ducatidriver.h200
-rw-r--r--arch/arm/plat-omap/include/syslink/notify_ducatidriver_defs.h152
-rw-r--r--arch/arm/plat-omap/include/syslink/notify_shmdriver.h108
-rw-r--r--arch/arm/plat-omap/include/syslink/notifydefs.h25
-rw-r--r--arch/arm/plat-omap/include/syslink/notifyerr.h198
-rw-r--r--arch/arm/plat-omap/include/syslink/platform.h45
-rw-r--r--arch/arm/plat-omap/include/syslink/platform_mem.h137
-rw-r--r--arch/arm/plat-omap/include/syslink/procmgr.h280
-rw-r--r--arch/arm/plat-omap/include/syslink/sharedregion.h110
-rw-r--r--arch/arm/plat-omap/include/syslink/sharedregion_ioctl.h181
-rw-r--r--arch/arm/plat-omap/include/syslink/sysmemmgr.h179
-rw-r--r--arch/arm/plat-omap/include/syslink/sysmemmgr_ioctl.h130
-rw-r--r--arch/arm/plat-omap/include/syslink/sysmgr.h182
-rw-r--r--arch/arm/plat-omap/include/syslink/sysmgr_ioctl.h100
-rw-r--r--arch/arm/plat-omap/iommu.c26
-rw-r--r--arch/arm/plat-omap/mailbox.c22
-rw-r--r--arch/arm/plat-omap/mcbsp.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--drivers/Kconfig7
-rw-r--r--drivers/Makefile7
-rwxr-xr-xdrivers/dsp/syslink/Kconfig62
-rw-r--r--drivers/dsp/syslink/multicore_ipc/Kbuild25
-rw-r--r--drivers/dsp/syslink/multicore_ipc/_listmp.h48
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gate_remote.c40
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/gatepeterson.c964
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gatepeterson_ioctl.c392
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/heap.c101
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/heapbuf.c1173
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/heapbuf_ioctl.c485
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/ipc_drv.c242
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/ipc_ioctl.c80
-rw-r--r--drivers/dsp/syslink/multicore_ipc/listmp.c440
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/listmp_sharedmemory.c1487
-rw-r--r--drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory_ioctl.c701
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/messageq.c1597
-rw-r--r--drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c490
-rw-r--r--drivers/dsp/syslink/multicore_ipc/messageq_transportshm.c778
-rw-r--r--drivers/dsp/syslink/multicore_ipc/messageq_transportshm_ioctl.c334
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/multiproc.c242
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c171
-rw-r--r--drivers/dsp/syslink/multicore_ipc/nameserver.c1005
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c597
-rw-r--r--drivers/dsp/syslink/multicore_ipc/nameserver_remote.c49
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c721
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/nameserver_remotenotify_ioctl.c346
-rw-r--r--drivers/dsp/syslink/multicore_ipc/platform.c1420
-rw-r--r--drivers/dsp/syslink/multicore_ipc/platform_mem.c288
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/platformcfg.c91
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/sharedregion.c800
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c354
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmemmgr.c459
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c227
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmgr.c846
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmgr_ioctl.c147
-rw-r--r--drivers/dsp/syslink/notify_ducatidriver/Kbuild19
-rw-r--r--drivers/dsp/syslink/notify_ducatidriver/drv_ducati.c348
-rwxr-xr-xdrivers/dsp/syslink/notify_ducatidriver/notify_ducati.c1262
-rwxr-xr-xdrivers/dsp/syslink/omap_notify/Kbuild19
-rwxr-xr-xdrivers/dsp/syslink/omap_notify/drv_notify.c918
-rwxr-xr-xdrivers/dsp/syslink/omap_notify/notify.c548
-rwxr-xr-xdrivers/dsp/syslink/omap_notify/notify_driver.c172
-rwxr-xr-xdrivers/dsp/syslink/procmgr/Kbuild10
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/Kbuild10
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/dmm4430.c355
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/dmm4430.h50
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c869
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/hw_mmu.c661
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/proc4430.c1053
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/proc4430.h147
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c400
-rwxr-xr-xdrivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h169
-rwxr-xr-xdrivers/dsp/syslink/procmgr/procdefs.h203
-rwxr-xr-xdrivers/dsp/syslink/procmgr/processor.c398
-rwxr-xr-xdrivers/dsp/syslink/procmgr/processor.h84
-rwxr-xr-xdrivers/dsp/syslink/procmgr/procmgr.c957
-rwxr-xr-xdrivers/dsp/syslink/procmgr/procmgr_drv.c758
-rwxr-xr-xdrivers/dsp/syslink/procmgr/procmgr_drvdefs.h541
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/Makefile5
-rw-r--r--drivers/media/video/dmm/Kconfig6
-rw-r--r--drivers/media/video/dmm/Makefile3
-rw-r--r--drivers/media/video/dmm/dmm.c274
-rw-r--r--drivers/media/video/dmm/dmm.h128
-rw-r--r--drivers/media/video/dmm/dmm_mem.c307
-rw-r--r--drivers/media/video/dmm/dmm_mem.h30
-rw-r--r--drivers/media/video/omap/Kconfig9
-rw-r--r--drivers/media/video/omap/Makefile4
-rwxr-xr-xdrivers/media/video/omap/omap_vout.c3182
-rw-r--r--drivers/media/video/omap/omap_voutdef.h164
-rw-r--r--drivers/media/video/omap/omap_voutlib.c259
-rw-r--r--drivers/media/video/omap/omap_voutlib.h34
-rw-r--r--drivers/media/video/tiler/Kconfig6
-rw-r--r--drivers/media/video/tiler/Makefile4
-rw-r--r--drivers/media/video/tiler/tcm/Makefile3
-rw-r--r--drivers/media/video/tiler/tcm/tcm.c1770
-rw-r--r--drivers/media/video/tiler/tcm/tcm.h103
-rw-r--r--drivers/media/video/tiler/tcm/tcm_dbg.h61
-rw-r--r--drivers/media/video/tiler/tcm/tcm_pri.h87
-rw-r--r--drivers/media/video/tiler/tcm/tcm_utils.c261
-rw-r--r--drivers/media/video/tiler/tcm/tcm_utils.h29
-rw-r--r--drivers/media/video/tiler/tiler.c885
-rw-r--r--drivers/media/video/tiler/tiler.h116
-rw-r--r--drivers/media/video/tiler/tiler_def.h158
-rw-r--r--drivers/media/video/tiler/tiler_pack.c269
-rw-r--r--drivers/media/video/tiler/tiler_rot.c239
-rw-r--r--drivers/media/video/v4l2-ioctl.c24
-rw-r--r--drivers/mmc/core/core.c11
-rw-r--r--drivers/mmc/core/sdio.c34
-rw-r--r--drivers/mmc/core/sdio_bus.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c41
-rw-r--r--drivers/regulator/twl-regulator.c162
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/serial/Kconfig27
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/omap-serial.c1323
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/video/omap2/Kconfig3
-rw-r--r--drivers/video/omap2/displays/Kconfig11
-rw-r--r--drivers/video/omap2/displays/Makefile2
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c514
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.h281
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/displays/panel-taal.c253
-rw-r--r--drivers/video/omap2/dss/Kconfig15
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/Makefile1
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/core.c87
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/dispc.c1829
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/display.c30
-rw-r--r--drivers/video/omap2/dss/dpi.c191
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/dsi.c2458
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/dss.c130
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/dss.h93
-rw-r--r--drivers/video/omap2/dss/hdmi.c1105
-rwxr-xr-xdrivers/video/omap2/dss/hdmi.h223
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/manager.c148
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/dss/overlay.c153
-rw-r--r--drivers/video/omap2/dss/rfbi.c15
-rw-r--r--drivers/video/omap2/dss/sdi.c39
-rw-r--r--drivers/video/omap2/omapfb/Kconfig1
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c2
-rwxr-xr-x[-rw-r--r--]drivers/video/omap2/omapfb/omapfb-main.c247
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h2
-rw-r--r--drivers/video/omap2/vram.c4
-rw-r--r--include/linux/mmc/host.h13
-rw-r--r--include/linux/mmc/sdio_func.h10
-rw-r--r--include/linux/mmc/sdio_ids.h5
-rw-r--r--include/linux/omapfb.h5
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/videodev2.h15
-rw-r--r--include/media/v4l2-ioctl.h5
259 files changed, 63166 insertions, 1958 deletions
diff --git a/TI_OMAP4_Kernel_Release_Notes_L24.x.txt b/TI_OMAP4_Kernel_Release_Notes_L24.x.txt
new file mode 100755
index 000000000000..9e71822cdebc
--- /dev/null
+++ b/TI_OMAP4_Kernel_Release_Notes_L24.x.txt
@@ -0,0 +1,480 @@
+8 April 2010
+
+
+1. Introduction
+----------------
+This document accompanies OMAP(TM) Software Release L24.5 for Linux 2.6.33-rc2
+on OMAP4430 SDP. The document specifies:
+ - Instructions for unpacking the release
+ - New features and features the release supports
+ - Planned future features
+ - Postponed features
+
+
+2. Release Summary
+------------------
+This is a release of the Linux Baseport for OMAP4430. It supports the
+OMAP4 Wakeup SDP board. The kernel is based on Linux-omap version 2.6.33-rc2
+from open source. U-boot is based on open-source version 1.1.4.
+
+The u-boot source can be obtained via GIT from:
+ http://dev.omapzoom.org/?p=bootloader/u-boot.git;a=shortlog;h=refs/heads/omap4_dev
+
+The x-loader source can be obtained via GIT from:
+ http://dev.omapzoom.org/?p=bootloader/x-loader.git;a=shortlog;h=refs/heads/omap4_dev
+
+The kernel source can be obtained via GIT from:
+ http://dev.omapzoom.org/?p=integration/kernel-omap4.git;a=shortlog;h=refs/heads/L24.5
+
+Components that are supported in the release:
+ X-loader, U-Boot, OS Kernel (SMP), Phoenix power IC, UART, GP Timer, GPIO, Watchdog,
+ Neon, I2C, MMC/SD/eMMC (with ADMA support), Ethernet, RTC,
+ SDMA (including descriptor loading), Keypad, Touch screen, McSPI, McBSP, Mentor USB,
+ Phoenix General Purpose ADC, Battery Charging, Power Management Frameworks,
+ Audio (Phoenix Audio IC, ABE, AESS),
+ Display driver (TAAL based), Basic display (DSS2 migration, FB dev), Tiler memory manager.
+ WLAN support
+
+
+3. Instructions
+----------------
+
+3.1 Board setup
+
+Please refer to the OMAP4430 SDP guide for instructions on setting up the OMAP4
+board.
+
+3.2 Compiling Images
+
+3.2.1 Compiling U-boot
+
+Set the environment variable PATH such that cross compile binaries point to the
+needed tool chain. Refer to section 5 for tool chain information.
+
+To select the default configuration for U-Boot type:
+ # make CROSS_COMPILE=arm-none-linux-gnueabi- omap4430sdp_config
+
+To build the U-Boot image type:
+ # make CROSS_COMPILE=arm-none-linux-gnueabi-
+
+
+3.2.2 Compiling X-loader (for booting from external/removable MMC)
+
+Set the environment variable PATH such that cross compile binaries point to the
+needed tool chain. Refer to section 5 for tool chain information.
+
+U-boot needs to be placed in a directory parallel to x-loader and compiled first.
+E.g.:
+ [DIR] omap4
+ +-- u-boot
+ +-- x-loader
+
+To select the default configuration for X-loader type:
+ # make CROSS_COMPILE=arm-none-linux-gnueabi- omap4430sdp_config
+
+To build the X-loader image type:
+ # make CROSS_COMPILE=arm-none-linux-gnueabi-
+ # make ift CROSS_COMPILE=arm-none-linux-gnueabi-
+
+The above step will create a MLO image, which can be copied into the
+MMC card for booting via MMC.
+
+3.2.3 Compiling X-loader (for booting from eMMC)
+
+Follow same steps as above to create an MLO image. A configuration header needs
+to be added at the begining of this MLO to create an image 'x-load.ch.bin' that
+can be written to the eMMC for eMMC-booting. Please contact your TI
+representative for obtaining the configuration header.
+
+3.2.4 Compiling the Kernel
+
+Set the environment variable PATH such that cross-compile binaries point to the
+needed tool chain. Refer to section 5 for tool chain information.
+
+The default configuration file for OMAP 4430 is present at
+arch/arm/configs/omap_4430sdp_defconfig.
+
+To work with the default configuration file, run following commands:
+ # make ARCH=arm CROSS_COMPILE=arm-none-linux-gnueabi-
+ omap_4430sdp_defconfig
+
+Build kernel with:
+ # make ARCH=arm CROSS_COMPILE=arm-none-linux-gnueabi- uImage
+
+NOTE: The above steps will create arch/arm/boot/uImage in the kernel directory
+ which is the binary used for booting.
+
+3.3 Flashing and Booting
+
+3.3.1 Booting from removable MMC
+
+Use the MLO (ref: section 3.2.2) and u-boot.bin (ref: section 3.2.1) binaries
+and follow the instructions at http://elinux.org/BeagleBoard#MMC.2FSD_boot
+to boot the board from removable MMC.
+
+Set the board switch settings as follows to boot the board from removable MMC:
+ S9 - 1-off, 2-on, 3-on, 4-on
+ S8 - 1-off, 2-on, 3-off, 4-on, 5-on, 6-on, 7-on, 8-on
+
+3.3.2 Flashing and booting from eMMC
+
+Follow the instructions below to save x-loader and u-boot binaries to eMMC and
+subsequently boot from eMMC.
+
+Get the x-loader and u-boot binaries (that you want to flash to eMMC) into RAM
+using one of the following options.
+
+Option 1:
+Use a debugger or OMAPFlasher tool to download x-loader (with configuration
+header, e.g. x-load.ch.bin) as well as u-boot.bin into
+SDRAM and run u-boot from SDRAM.
+
+Option 2:
+Boot via a removable MMC card following the instructions in section 3.3.1, but
+additionally keep the x-load.ch.bin that you want to flash to eMMC in the same
+removable MMC card. After booting from removable MMC, copy the x-load.ch.bin
+and u-boot.bin from the MMC to RAM using the following commands:
+OMAP44XX SDP # mmcinit 0
+OMAP44XX SDP # fatload mmc 0 [Ram Address X] x-load.ch.bin
+OMAP44XX SDP # fatload mmc 0 [Ram Address Y] u-boot.bin
+
+Once the x-loader and u-boot binaries are in RAM, use the following commands
+to copy them to eMMC.
+
+1) Erase all EMMC contents
+OMAP44XX SDP # mmcinit 1
+OMAP44XX SDP # mmc 1 erase 0x0 0x800000
+2) Flash x-loader
+OMAP44XX SDP # mmc 1 erase 0x100 [size of x-loader in hex]
+OMAP44XX SDP # mmc 1 write [Ram Address X] 0x100 [size of x-loader in hex]
+Note: Ram address X is address where x-loader is downloaded in RAM using either the
+debugger, OMAPFlasher or removable MMC.
+3) Flash u-boot.bin
+OMAP44XX SDP # mmc 1 erase 0x400 [size]
+OMAP44XX SDP # mmc 1 write [Ram Address Y] 0x400 [size]
+Note: Ram address Y is address where u-boot is downloaded in RAM using either the
+debugger, OMAPFlasher or removable MMC.
+
+Set the board switch settings as follows to boot the board from eMMC:
+ S9 - 1-off, 2-on, 3-on, 4-on
+ S8 - 1-on, 2-on, 3-on, 4-on, 5-on, 6-off, 7-on, 8-on
+
+3.3.3 Using Filesystem from eMMC
+
+Use fdisk to create an ext2 partition (/dev/mmcblk0p2) in eMMC leaving 2MB of space
+at the top.
+
+Use the following commands to flash the filesystem to eMMC partition
+1) Bootup with a known good kernel and filesystem from RAM
+2) Use a filesystem which has USB Gadget filestorage module g_filestorage.ko
+3) Connect the usb cable from the board to the PC
+4) Load the USB filestorage gadget module as:
+insmod g_filestorage.ko file=/dev/mmcblk0p2 stall=0 removable=1
+5) When the USB drive enumerates on the Linux PC, mount the drive
+6) Add ext2 filesystem to the mounted drive
+7) sync and then mount the drive.
+8) Add the following in bootargs "root=/dev/mmcblk0p2 rw rootdelay=1" to be able
+to use the eMMC based FS as rootfs.
+
+3.3.4 Bootargs
+
+Since UART3 is used as the console on OMAP4 Wakeup board, you need to set
+'console=ttyS2,115200n8' in the bootargs
+
+While using a filesystem from MMC or eMMC, you may use params such as:
+'root=/dev/mmcblk0p2 rw rootdelay=1'. The rootdelay is required because
+removable cards may take a few seconds to be detected.
+
+While using a filesystem via NFS, you may use params such as:
+'root=/dev/nfs rw nfsroot=<serverip>:<mount-partition>,nolock'
+
+Refer to section 3.4.2 for display related bootargs options
+
+For detailed list and explaination of the various boot parameters, please refer
+http://www.kernel.org/pub/linux/kernel/people/gregkh/lkn/lkn_pdf/ch09.pdf
+
+
+3.4 Using FB and V4L2 Display driver
+
+3.4.1 To enable secondary display
+
+Please use following set of commands after kernel bootup for setting
+secondary display ON.
+
+1) Enable secondary display (display1)
+ echo "1" > /sys/devices/platform/omapdss/display1/enabled
+2) Disable overlay1
+ echo "0" > /sys/devices/platform/omapdss/overlay1/enabled
+3) Attach secondary display as a manager for overlay1
+ echo "2lcd" > /sys/devices/platform/omapdss/overlay1/manager
+4) Enable overlay1
+ echo "1" > /sys/devices/platform/omapdss/overlay1/enabled
+
+3.4.2 Display specific bootargs options
+
+3.4.2.1 Using 1 FB and 3 V4L2 devices
+Add the following in bootargs "omapfb.numfb=1"
+
+3.4.2.2 Using 2 FB and 2 V4L2 devices (default option in L24.2 onwards)
+Add the following in bootargs "omapfb.numfb=2"
+
+3.4.2.3 Enabling DSS DEBUG prints
+Add the following in bootargs "omapdss.debug=1"
+
+3.4.3 Pico DLP support
+
+OMAP4 device supports a pico DLP and secondary LCD output on the secondary
+LCD manager (with the name of '2lcd'). The configuration of the display
+happens in a non-conventional way.
+
+3.4.3.1 For building kernel image with Pico DLP support:
+ In menuconfig following "pico DLP" option needs to be set
+ Device drivers ->
+ Graphics support ->
+ OMAP 2/3 Diaplay subsystem support ->
+ omap 2/3 display drivers ->
+ <*> pico DLP
+3.4.3.2 After bootup:
+
+To configure output to either pico DLP or secondary LCD, set the overlay's
+manager to the secondary LCD manager:
+
+echo "0" > /sys/devices/platform/omapdss/overlay0/enabled
+echo "2lcd" > /sys/devices/platform/omapdss/overlay0/manager
+echo "1" > /sys/devices/platform/omapdss/overlay0/enabled
+
+3.4.3.3 Then enable the correct display to chose pico DLP or secondary LCD (default
+is secondary LCD). Assuming display1 is secondary LCD, and display3 is
+pico DLP:
+
+echo "0" > /sys/devices/platform/omapdss/display1/enabled
+# set the secondary manager to pico DLP
+echo "pico_DLP" > /sys/devices/platform/omapdss/manager2/display -- This will set secondary manager to pico.
+echo "1" > /sys/devices/platform/omapdss/display3/enabled
+
+3.4.3.4 Now to switch back to secondary
+echo "0" > /sys/devices/platform/omapdss/display3/enabled
+echo "2lcd" > /sys/devices/platform/omapdss/manager2/display
+echo "1" > /sys/devices/platform/omapdss/display1/enabled
+
+3.5 Enabling Power Management Features
+
+All PM features are disabled in the default OMAP4 kernel configuration
+(omap_4430sdp_defconfig).
+All clocks are still kept enabled on bootloader.
+
+To test PM features please use omap4_pm_defconfig. Note that PM features
+on OMAP4430 ES1.0 can only be verified on a EMU device. The EMU device needs
+the x-loader signed with the right PPA release.
+
+omap4_pm_defconfig does not enable Tick suppression. To do so enable these
+options manually.
+
+Enable the following options in menuconfig
+ Kernel Features ---> Use local timer interrupts
+ Kernel Features ---> Tickless System
+ Kernel Features ---> High Resolution Timer support
+
+omap4_pm_defconfig has VFP support disabled due to a know issue with enabling
+FPU on ES1. Please do not enable VFP support.
+
+3.5.1 CPU Hotplug
+
+To hotplug out CPU1 use the following command
+
+echo 0 > /sys/devices/system/cpu/cpu1/online
+
+To bring back CPU1 online use the following command
+
+echo 1 > /sys/devices/system/cpu/cpu1/online
+
+Note that CPU0 cannot be offlined due to hardware limitation.
+Currenlty CPU1 transitions to CSWR state when offlined. This can
+only be verified using OBS signals.
+
+3.5.2 CPUIdle
+
+To test cpuidle, it's necessary that CPU1 is hotplugged out.
+Use the below command to hotplug out CPU1
+echo 0 > /sys/devices/system/cpu/cpu1/online
+
+Once CPU1 is hotplugged out, OBS signals can be used to
+verify MPU/CPU0/CPU1 transtitions.
+
+Currently only sleep state of MPU CSWR is supported.
+
+3.5.3 System Suspend
+
+To test system suspend, use the following command
+
+echo mem > /sys/power/state
+
+Use OBS signals to verify MPU/CPU0/CPU1 transitions.
+
+Currently only sleep state of MPU CSWR is supported.
+
+Please use a ramdisk inorder to test system suspend feature
+
+3.6 Using Audio Driver
+
+Include the asound.conf file in /etc directory in your FS. It maps the different
+audio devices
+
+Usage:
+aplay -D mm file.wav
+aplay -D tones file.wav
+aplay -D vx file.wav
+aplay -D hd_audio file.wav
+
+
+
+4. Features
+-----------
+
+4.1 New Features
+
+- Power Management Frameworks
+ - Clock framework
+ - Clockdomain framework
+ - Powerdomain framework
+ - Regulator framework
+ - CPUidle with support upto MPU CSWR
+ - System wide suspend with support upto MPU CSWR
+ - CPU1 hotplug with support upto CPU1 CSWR
+NOTE:
+ - All PM features are disabled in the default OMAP4 kernel configuration.
+ - All clocks are still kept enabled on bootloader.
+ - These features can only be validated on EMU devices with omap4_pm_defconfig
+
+- Audio
+ - Audio Playback to Phoenix Earphone
+ - Audio Playback using Tones port
+ - ALSA controls for ABE mixers
+ - Add McPDM power management support
+
+4.2 Supported Features
+
+- Boot-loader:
+ X-loader with MMC/eMMC/SD support
+ U-boot with USB, MMC/eMMC/SD and Ethernet support
+
+- OS Kernel
+ OS Kernel (SMP)
+ Phoenix power IC,
+ UART,
+ GP Timer,
+ GPIO,
+ Watchdog,
+ NEON,
+ I2C,
+ MMC/SD/eMMC (with ADMA support),
+ Ethernet,
+ RTC,
+ SDMA (including descriptor loading),
+ Keypad,
+ Touch screen,
+ McSPI,
+ McBSP,
+ Mentor USB,
+ Phoenix General Purpose ADC,
+ Battery Charging.
+
+- Audio
+ Audio playback to Phoenix Hand-free, Head set output devices.
+ HDMI audio playback
+ Audio record through Phoenix analog MIC input.
+ Simultaneous audio playback and capture.
+
+- Video
+ Display driver
+ - FBdev and V4L2 API support
+ - Primary and Secondary DSI and HDMI displays
+ - Simultaneous rendering on all 3 displays
+ Tiler memory manager.
+
+- WLAN
+
+4.3 Postponed Features
+
+None
+
+4.4 Future Planned Features
+
+Refer to Program schedule.
+
+4.5 Defects Fixed in This Release
+
+OMAPS00214290 MUSB Tx throughput degrades with Poky FS accessed over NFS.
+OMAPS00216452 Fastboot devices are not detected if USB cable is connected after boot up
+OMAPS00214706 Adapt DPI to support CHANNEL LCD2 along with CHANNEL LCD in OMAP4
+OMAPS00216009 Call to display->update call is missing in function pan display
+OMAPS00215432 CONFIG_PANEL_4430SDP_TAAL lacks config dependency
+OMAPS00215230 1680 *1050 resolution DVI monitor gives SYNC LOST
+OMAPS00213860 DQUE ioctl() takes ~40-50ms although buffer is available.
+OMAPS00213905 Dsi framedone timeout seen when switching from HDMI to primary display
+OMAPS00213910 tearing effect seen with alternate frames being colored
+OMAPS00213912 DSI lcd display does refresh at 40fps instead of 60fps
+OMAPS00214176 Remove the mux configuration for dsi1, dsi2, hdmi
+OMAPS00214175 Move GPIO and twl config to board file for dsi1, dsi2, hdmi
+OMAPS00216009 Call to display->update call is missing in function omapfb_pan_display
+OMAPS00212907 ALSA Record
+
+Known Issues:
+OMAPS00216391 With tearing effect patch , frame drops from 60fps to 30fps
+OMAPS00216592 There is no way available for dynamically setting/resetting dithering bit
+OMAPS00213907 With omapfb.numfb=1 in bootargs , the rest of pipelines are not given to v4l2
+OMAPS00214116 Visible tearing during the execution of a 3D app with flipping enabled.
+OMAPS00215432 CONFIG_PANEL_4430SDP_TAAL lacks config dependency on DSS_DSI
+OMAPS00216002 DSS function default_wait_vsync doesn't distinguish between primary and secondary LCD panels
+OMAPS00216444 Provide KConfig options for DSI2, Sec Taal panel, and all DSI specific knobs
+OMAPS00216589 VGA Rotation on LCD does not work
+OMAPS00216585 V4L2 Rotation ioctls rotate 90 degrees when set to 270 degress and vice versa
+OMAPS00216582 streaming test case gives issue when multiple buffers are used in the videobuf queu
+
+4.6 Open Defects
+
+OMAPS00214020 ethernet doesn't get configured with L24.4 kernel
+OMAPS00214519 After boot SATO image and Calibrate Touchscreen, if the screen is touched X-server crashes.
+OMAPS00215651 MUSB driver in host mode has problems with unaligned DMA
+OMAPS00215668 MUSB driver in device mode has problems with unaligned DMA.
+OMAPS00216393 DMA testcases should declare set_test_passed(1) only during rmmod
+
+
+4.7 Open Change Requests
+
+None
+
+4.8 Rejected Defects
+
+None
+
+4.9 Postponed Defects
+
+None
+
+4.10 Limitations
+
+1) At u-boot level saveenv command doesn't work.
+2) MAC address reading from ethernet EEPROM is not supported.
+
+
+5. Tool Chain
+-------------
+The toolchain used to build the code can be obtained from CodeSourcery at the
+following URL:
+ http://www.codesourcery.com/sgpp/lite/arm/portal/release858
+
+The tool chain version is Sourcery G++ Lite 2009q1-203 for ARM GNU/Linux.
+
+The tool chain requires glibc 2.3.0 or higher to compile the source code on
+the host machine.
+
+
+--------------------------------------------------------------------------------
+
+OMAP(TM) is a Trademark of Texas Instruments Incorporated
+Innovator(TM) is a Trademark of Texas Instruments Incorporated
+Code Composer Studio(TM) is a Trademark of Texas Instruments Incorporated
+DSP/BIOS(TM) is a Trademark of Texas Instruments Incorporated
+
+All other trademarks are the property of the respective owner.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 03d7519a75fa..9f6e9b58bf96 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1268,6 +1268,25 @@ config UACCESS_WITH_MEMCPY
However, if the CPU data cache is using a write-allocate mode,
this option is unlikely to provide any performance gain.
+config UNOFFICIAL_USER_DMA_API
+ bool "Enable unofficial user DMA API (READ HELP TEXT)"
+ depends on EXPERIMENTAL
+ help
+ This option enables the exposure of the kernel's three DMA cache
+ coherence functions to userspace via three ARM private syscalls.
+
+ This API is not officially supported; it is a stop gap measure
+ to allow developers to achieve their goals. It doesn't take
+ account of any DMA restrictions which may be in the system, and
+ makes no attempt to work around those.
+
+ The user is entirely responsible for coordinating the use of this
+ API with DMA activity and CPU snooping activity. Improper use
+ of this API can result in random data corruption, especially if
+ the memory contains DMA scatterlists.
+
+ Use of this API will taint the kernel.
+
endmenu
menu "Boot options"
diff --git a/arch/arm/configs/omap4_pm_defconfig b/arch/arm/configs/omap4_pm_defconfig
index 32975a35d6c7..0e323ca1060f 100644
--- a/arch/arm/configs/omap4_pm_defconfig
+++ b/arch/arm/configs/omap4_pm_defconfig
@@ -835,6 +835,7 @@ CONFIG_REGULATOR_TWL4030=y
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
# CONFIG_FB is not set
+# CONFIG_OMAP2_DSS is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -999,6 +1000,9 @@ CONFIG_RTC_DRV_TWL4030=y
# TI VLYNQ
#
# CONFIG_STAGING is not set
+CONFIG_DMM_OMAP=y
+CONFIG_TILER_OMAP=y
+# CONFIG_Sys_Link is not set
#
# File systems
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
index 43a81373027f..11d04492a6ae 100644
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ b/arch/arm/configs/omap_4430sdp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32
-# Sun Dec 6 23:37:45 2009
+# Linux kernel version: 2.6.33-rc2
+# Fri Apr 9 12:18:37 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -22,6 +22,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -36,14 +37,18 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
#
# RCU Subsystem
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -127,14 +132,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
# CONFIG_FREEZER is not set
#
@@ -163,6 +195,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_IXP2000 is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
# CONFIG_ARCH_KIRKWOOD is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
@@ -185,6 +218,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_DAVINCI is not set
CONFIG_ARCH_OMAP=y
# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_U8500 is not set
#
# TI OMAP Implementations
@@ -200,7 +234,8 @@ CONFIG_ARCH_OMAP4=y
# CONFIG_OMAP_RESET_CLOCKS is not set
# CONFIG_OMAP_MUX is not set
CONFIG_OMAP_MCBSP=y
-# CONFIG_OMAP_MBOX_FWK is not set
+CONFIG_OMAP_MBOX_FWK=y
+CONFIG_OMAP_IOMMU=y
# CONFIG_OMAP_MPU_TIMER is not set
CONFIG_OMAP_32K_TIMER=y
CONFIG_OMAP_32K_TIMER_HZ=128
@@ -215,6 +250,8 @@ CONFIG_OMAP_PM_NOOP=y
#
# OMAP Board Type
#
+# CONFIG_WIFI_CONTROL_FUNC is not set
+# CONFIG_TIWLAN_SDIO is not set
CONFIG_MACH_OMAP_4430SDP=y
#
@@ -232,6 +269,7 @@ CONFIG_CPU_TLB_V7=y
CONFIG_CPU_HAS_ASID=y
CONFIG_CPU_CP15=y
CONFIG_CPU_CP15_MMU=y
+# CONFIG_CPU_USE_DOMAINS is not set
#
# Processor Features
@@ -244,12 +282,15 @@ CONFIG_CPU_CP15_MMU=y
CONFIG_HAS_TLS_REG=y
CONFIG_OUTER_CACHE=y
CONFIG_CACHE_L2X0=y
+CONFIG_CACHE_PL310=y
CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARCH_HAS_BARRIERS=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
CONFIG_PL310_ERRATA_588369=y
CONFIG_ARM_GIC=y
+CONFIG_COMMON_CLKDEV=y
#
# Bus support
@@ -294,13 +335,12 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_LEDS is not set
CONFIG_ALIGNMENT_TRAP=y
# CONFIG_UACCESS_WITH_MEMCPY is not set
+CONFIG_UNOFFICIAL_USER_DMA_API=y
#
# Boot options
@@ -352,27 +392,76 @@ CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_FIB_HASH=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -389,17 +478,24 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
# CONFIG_MTD is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
# CONFIG_MG_DISK is not set
# CONFIG_MISC_DEVICES is not set
CONFIG_HAVE_IDE=y
@@ -415,12 +511,48 @@ CONFIG_HAVE_IDE=y
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
CONFIG_KS8851=y
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
#
@@ -429,6 +561,7 @@ CONFIG_KS8851=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -442,12 +575,46 @@ CONFIG_INPUT_EVDEV=y
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
CONFIG_KEYBOARD_OMAP=y
+# CONFIG_KEYBOARD_TWL4030 is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
CONFIG_TOUCHSCREEN_SYNTM12XX=y
# CONFIG_INPUT_MISC is not set
@@ -457,6 +624,8 @@ CONFIG_TOUCHSCREEN_SYNTM12XX=y
CONFIG_SERIO=y
CONFIG_SERIO_SERPORT=y
CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
# CONFIG_GAMEPORT is not set
#
@@ -486,8 +655,10 @@ CONFIG_SERIAL_8250_RSA=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_OMAP is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -499,6 +670,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -514,9 +686,46 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_OMAP=y
# CONFIG_I2C_SIMTEC is not set
-CONFIG_SPI_OMAP24XX=y
-CONFIG_SPI_MASTER=y
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
#
# PPS support
#
@@ -533,6 +742,10 @@ CONFIG_GPIOLIB=y
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_TWL4030 is not set
#
# PCI GPIO expanders:
@@ -541,16 +754,33 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
#
# AC97 GPIO expanders:
#
# CONFIG_W1 is not set
CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_DS2782 is not set
+CONFIG_TWL6030_BCI_BATTERY=y
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
@@ -566,21 +796,155 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
+CONFIG_TWL6030_GPADC=y
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_88PM8607 is not set
+# CONFIG_AB4500_CORE is not set
CONFIG_REGULATOR=y
-# CONFIG_MEDIA_SUPPORT is not set
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+CONFIG_VIDEO_V4L1_COMPAT=y
+# CONFIG_DVB_CORE is not set
+CONFIG_VIDEO_MEDIA=y
+
+#
+# Multimedia drivers
+#
+CONFIG_IR_CORE=y
+CONFIG_VIDEO_IR=y
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=y
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=y
+CONFIG_MEDIA_TUNER_TDA8290=y
+CONFIG_MEDIA_TUNER_TDA9887=y
+CONFIG_MEDIA_TUNER_TEA5761=y
+CONFIG_MEDIA_TUNER_TEA5767=y
+CONFIG_MEDIA_TUNER_MT20XX=y
+CONFIG_MEDIA_TUNER_XC2028=y
+CONFIG_MEDIA_TUNER_XC5000=y
+CONFIG_MEDIA_TUNER_MC44S803=y
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEOBUF_GEN=y
+CONFIG_VIDEOBUF_DMA_SG=y
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_IR_I2C=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+CONFIG_VIDEO_OMAP3_OUT=y
+# CONFIG_SOC_CAMERA is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_I2C_SI4713 is not set
+# CONFIG_RADIO_SI4713 is not set
+# CONFIG_RADIO_SI470X is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_RADIO_TEF6862 is not set
+# CONFIG_DAB is not set
#
# Graphics support
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_HDMI=y
+CONFIG_OMAP2_DSS_DSI=y
+# CONFIG_OMAP2_DSS_USE_DSI_PLL is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE=y
+CONFIG_FB_OMAP2_NUM_FBS=2
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_PICO_DLP is not set
+CONFIG_PANEL_TAAL=y
+# CONFIG_LCD_4430SDP is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LMS283GF05 is not set
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
#
# Display device support
@@ -592,6 +956,24 @@ CONFIG_REGULATOR=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_SOUND=y
# CONFIG_SOUND_OSS_CORE is not set
CONFIG_SND=y
@@ -620,12 +1002,13 @@ CONFIG_SND_ARM=y
CONFIG_SND_SPI=y
CONFIG_SND_SOC=y
CONFIG_SND_OMAP_SOC=y
-CONFIG_SND_OMAP_SOC_MCPDM=y
+CONFIG_OMAP_MCPDM=y
+CONFIG_SND_OMAP_SOC_ABE=y
CONFIG_SND_OMAP_SOC_SDP4430=y
CONFIG_SND_OMAP_SOC_HDMI=y
CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_ALL_CODECS is not set
-CONFIG_SND_SOC_TWL6040=y
+CONFIG_SND_SOC_ABE_TWL6040=y
# CONFIG_SOUND_PRIME is not set
# CONFIG_HID_SUPPORT is not set
CONFIG_USB_SUPPORT=y
@@ -697,6 +1080,7 @@ CONFIG_USB_G_SERIAL=m
CONFIG_USB_OTG_UTILS=y
# CONFIG_USB_GPIO_VBUS is not set
# CONFIG_USB_ULPI is not set
+# CONFIG_TWL4030_USB is not set
CONFIG_NOP_USB_XCEIV=y
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
@@ -716,6 +1100,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_MMC_SDHCI is not set
# CONFIG_MMC_OMAP is not set
CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -724,9 +1110,7 @@ CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-# CONFIG_DMADEVICES is not set
-# CONFIG_AUXDISPLAY is not set
-# CONFIG_UIO is not set
+# CONFIG_RTC_DEBUG is not set
#
# RTC interfaces
@@ -734,19 +1118,79 @@ CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
CONFIG_RTC_INTF_SYSFS=y
CONFIG_RTC_INTF_PROC=y
CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
-# I2C TWL drivers
+# SPI RTC drivers
#
-CONFIG_RTC_DRV_TWL4030=y
-CONFIG_REGULATOR_TWL4030=y
-CONFIG_TWL6030_GPADC=y
-CONFIG_TWL6030_BCI_BATTERY=y
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
#
# TI VLYNQ
#
# CONFIG_STAGING is not set
+CONFIG_DMM_OMAP=y
+CONFIG_TILER_OMAP=y
+CONFIG_Sys_Link=y
+CONFIG_SYSLINK_PROC=y
+CONFIG_SYSLINK_PROC4430=y
+CONFIG_MPU_BRIDGE_NOTIFY=y
+CONFIG_NOTIFY_DUCATI=y
+CONFIG_MPU_SYSLINK_IPC=y
+CONFIG_SYSLINK_USE_SYSMGR=y
+CONFIG_OMAP_IOMMU_DEBUG_MODULE=y
#
# File systems
@@ -764,6 +1208,7 @@ CONFIG_JBD=y
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
@@ -772,6 +1217,7 @@ CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
CONFIG_PRINT_QUOTA_WARNING=y
CONFIG_QUOTA_TREE=y
# CONFIG_QFMT_V1 is not set
@@ -836,7 +1282,9 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_ACL_SUPPORT=y
@@ -844,6 +1292,12 @@ CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
#
# Partition Types
@@ -906,6 +1360,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
#
# Kernel hacking
@@ -960,13 +1415,11 @@ CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
-# CONFIG_BRANCH_PROFILE_NONE is not set
-# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
-# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -975,6 +1428,7 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_DEBUG_ERRORS is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
#
# Security options
@@ -982,7 +1436,11 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
@@ -1099,3 +1557,4 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/omap_4430simulator_defconfig b/arch/arm/configs/omap_4430simulator_defconfig
new file mode 100644
index 000000000000..6274bb1c53e0
--- /dev/null
+++ b/arch/arm/configs/omap_4430simulator_defconfig
@@ -0,0 +1,1017 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.31-rc9
+# Thu Oct 1 15:58:54 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+
+#
+# TI OMAP Implementations
+#
+# CONFIG_ARCH_OMAP1 is not set
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
+CONFIG_ARCH_OMAP4=y
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_RESET_CLOCKS is not set
+CONFIG_OMAP_MUX=y
+CONFIG_OMAP_MCBSP=y
+CONFIG_OMAP_MBOX_FWK=y
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+CONFIG_OMAP_LL_DEBUG_UART1=y
+# CONFIG_OMAP_LL_DEBUG_UART2 is not set
+# CONFIG_OMAP_LL_DEBUG_UART3 is not set
+
+#
+# OMAP Board Type
+#
+CONFIG_MACH_OMAP_4430SDP=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_ARM_ERRATA_484863=y
+
+CONFIG_ARM_GIC=y
+# CONFIG_OMAP_L2_EVENT_DEBUG is not set
+CONFIG_OMAP4_SUDO_ROMCODE=y
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+CONFIG_HAVE_ARM_SCU=y
+CONFIG_HAVE_ARM_TWD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_NR_CPUS=2
+# CONFIG_LOCAL_TIMERS is not set
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/ram0 rw mem=128M console=ttyS0,115200n8 initrd=0x81600000,20M ramdisk_size=20480 loglevel=1"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_HAVE_AOUT=y
+CONFIG_BINFMT_AOUT=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_OMAP=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_SPI=y
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+CONFIG_I2C_OMAP=y
+CONFIG_SPI_OMAP24XX=y
+CONFIG_SPI_MASTER=y
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_OMAP_WATCHDOG=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_MEDIA_SUPPORT is not set
+CONFIG_TWL6030_CORE=y
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_OMAP=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 44xx high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+CONFIG_USB_MUSB_PERIPHERAL=y
+# CONFIG_USB_MUSB_OTG is not set
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_MUSB_PIO_ONLY=y
+CONFIG_USB_MUSB_DEBUG=y
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_NEW_LEDS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TWL=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+CONFIG_REGULATOR_TWL=y
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index ef670718c4aa..2b37bf90866d 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -183,6 +183,21 @@
* DMA Cache Coherency
* ===================
*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -202,6 +217,8 @@ struct cpu_cache_fns {
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
+ void (*dma_inv_range)(const void *, const void *);
+ void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
@@ -227,6 +244,8 @@ extern struct cpu_cache_fns cpu_cache;
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
+#define dmac_inv_range cpu_cache.dma_inv_range
+#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -253,10 +272,14 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
*/
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
+#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
+#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -376,6 +399,10 @@ static inline void __flush_icache_all(void)
#ifdef CONFIG_ARM_ERRATA_411920
extern void v6_icache_inval_all(void);
v6_icache_inval_all();
+#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
+ asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
+ :
+ : "r" (0));
#else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d2a59cfc30ce..884726efef33 100644..100755
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -72,6 +72,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
*/
extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem * __arm_multi_strided_ioremap(int, unsigned long *,
+ size_t *,unsigned long *, unsigned long *, unsigned int);
+
extern void __iounmap(volatile void __iomem *addr);
/*
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c2f1605de359..a57ec73da99b 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -46,6 +46,9 @@
#define TLB_V7_UIS_FULL (1 << 20)
#define TLB_V7_UIS_ASID (1 << 21)
+/* Inner Shareable BTB operation (ARMv7 MP extensions) */
+#define TLB_V7_IS_BTB (1 << 22)
+
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
@@ -183,7 +186,7 @@
#endif
#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
#else
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
@@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void
@@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
/*
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 4e506d09e5f9..1298ea98909b 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -406,7 +406,17 @@
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
+=======
+ * These are temporary interfaces; they are a stop gap until we get
+ * a proper solution to DMA. These won't always work for every
+ * device. Only use these IF you *really* know what you're doing.
+ * Don't be surprised if they go away in later kernels.
+>>>>>>> ARM: UNOFFICIAL_USER_DMA_API:arch/arm/include/asm/unistd.h
*/
+#define __ARM_NR_temp_dma_inv_range (__ARM_NR_BASE+0x0007fd)
+#define __ARM_NR_temp_dma_clean_range (__ARM_NR_BASE+0x0007fe)
+#define __ARM_NR_temp_dma_flush_range (__ARM_NR_BASE+0x0007ff)
+
#ifdef __KERNEL__
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 97dc142898e5..dd2b679c3dd6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache;
+EXPORT_SYMBOL(outer_cache);
#endif
struct stack {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d43a5b17dcdf..2a42d220dcc5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -219,9 +219,6 @@ void __ref cpu_die(void)
{
unsigned int cpu = smp_processor_id();
- /* FIXME: BUG: scheduling while atomic: swapper/0/0x00000002 */
- preempt_enable();
-
local_irq_disable();
idle_task_exit();
@@ -229,6 +226,7 @@ void __ref cpu_die(void)
* actual CPU shutdown procedure is at least platform (if not
* CPU) specific
*/
+ preempt_enable_no_resched();
platform_cpu_die(cpu);
/*
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 23d76738ff7b..10098ed778e5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -422,7 +422,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
-static inline void
+static void
do_cache_op(unsigned long start, unsigned long end, int flags)
{
struct mm_struct *mm = current->active_mm;
@@ -517,6 +517,22 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
#endif
return 0;
+#ifdef CONFIG_UNOFFICIAL_USER_DMA_API
+ /*
+ * These are temporary interfaces; they are a stop gap until we get
+ * a proper solution to DMA. These won't always work for every
+ * device. Only use these IF you *really* know what you're doing.
+ * Don't be surprised if they go away in later kernels.
+ */
+ case NR(temp_dma_inv_range):
+ case NR(temp_dma_clean_range):
+ case NR(temp_dma_flush_range):
+ {
+ extern int temp_user_dma_op(unsigned long, unsigned long, int);
+ return temp_user_dma_op(regs->ARM_r0, regs->ARM_r1, no & 3);
+ }
+#endif
+
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 10eafa70a909..cb446cb35468 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -81,6 +81,32 @@ config MACH_OMAP3EVM
bool "OMAP 3530 EVM board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ depends on MACH_OMAP_4430SDP
+ select WIRELESS_EXT
+ select WEXT_CORE
+ select WEXT_PROC
+ select WEXT_PRIV
+ default Y
+ help
+ Enables Power/Reset/Carddetect function abstraction
+config TIWLAN_SDIO
+ bool "TI WLAN Enhanced SDIO Contoller support"
+ depends on MMC_OMAP || MMC_OMAP_MODULE || MMC_OMAP_HS || MMC_OMAP_HS_MODULE
+ help
+ Say Y here if you want to be able to use TI's WLAN device using the
+ SDIO interface. If unsure, say N.
+config TIWLAN_MMC_CONTROLLER
+ int "MMC Controller number that TI WLAN chip is connected to"
+ range 1 5
+ depends on TIWLAN_SDIO
+ default "5"
+ help
+ Choose the number of the MMC controller that TI WLAN chip is
+ connected to. TI WLAN has SDIO host controller that will control
+ this MMC port.
+
config MACH_OMAP3517EVM
bool "OMAP3517/ AM3517 EVM board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4aa8f128d9f0..ee6f5ce08b40 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -49,8 +49,7 @@ endif
# PRCM
obj-$(CONFIG_ARCH_OMAP2) += cm.o
obj-$(CONFIG_ARCH_OMAP3) += cm.o
-obj-$(CONFIG_ARCH_OMAP4) += cm4xxx.o voltage.o opp4xxx.o \
- cpuidle44xx.o pm44xx.o sleep44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += cm4xxx.o voltage.o opp4xxx.o cpuidle44xx.o pm44xx.o
# Clock framework
obj-$(CONFIG_ARCH_OMAP2) += clock2xxx.o clock2xxx_data.o
@@ -67,7 +66,7 @@ mailbox_mach-objs := mailbox.o
iommu-y += iommu2.o
iommu-$(CONFIG_ARCH_OMAP3) += omap3-iommu.o
-
+iommu-$(CONFIG_ARCH_OMAP4) += omap4-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += $(iommu-y)
i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
@@ -115,6 +114,7 @@ obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
mmc-twl4030.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \
mmc-twl4030.o
+obj-$(CONFIG_TIWLAN_SDIO) += board-4430sdp-wifi.o
obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
diff --git a/arch/arm/mach-omap2/board-4430sdp-wifi.c b/arch/arm/mach-omap2/board-4430sdp-wifi.c
new file mode 100755
index 000000000000..b13e9fc3767a
--- /dev/null
+++ b/arch/arm/mach-omap2/board-4430sdp-wifi.c
@@ -0,0 +1,138 @@
+/*
+ * Board support file for containing WiFi specific details for OMAP4430 SDP.
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * Author: Pradeep Gurumath <pradeepgurumath@ti.com>
+ *
+ * Based on mach-omap2/board-3430sdp.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* linux/arch/arm/mach-omap2/board-4430sdp-wifi.c
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/err.h>
+
+#include <asm/gpio.h>
+#include <asm/io.h>
+#include <plat/wifi_tiwlan.h>
+
+#define SDP4430_WIFI_PMENA_GPIO 54
+#define SDP4430_WIFI_IRQ_GPIO 53
+
+static int sdp4430_wifi_cd; /* WIFI virtual 'card detect' status */
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+
+int omap_wifi_status_register(void (*callback)(int card_present,
+ void *dev_id), void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+
+ wifi_status_cb_devid = dev_id;
+
+ return 0;
+}
+
+int omap_wifi_status(int irq)
+{
+ return sdp4430_wifi_cd;
+}
+
+int sdp4430_wifi_set_carddetect(int val)
+{
+ printk(KERN_WARNING"%s: %d\n", __func__, val);
+ sdp4430_wifi_cd = val;
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ printk(KERN_WARNING "%s: Nobody to notify\n", __func__);
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(sdp4430_wifi_set_carddetect);
+#endif
+
+static int sdp4430_wifi_power_state;
+
+int sdp4430_wifi_power(int on)
+{
+ printk(KERN_WARNING"%s: %d\n", __func__, on);
+ gpio_set_value(SDP4430_WIFI_PMENA_GPIO, on);
+ sdp4430_wifi_power_state = on;
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(sdp4430_wifi_power);
+#endif
+
+static int sdp4430_wifi_reset_state;
+int sdp4430_wifi_reset(int on)
+{
+ printk(KERN_WARNING"%s: %d\n", __func__, on);
+ sdp4430_wifi_reset_state = on;
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(sdp4430_wifi_reset);
+#endif
+
+struct wifi_platform_data sdp4430_wifi_control = {
+ .set_power = sdp4430_wifi_power,
+ .set_reset = sdp4430_wifi_reset,
+ .set_carddetect = sdp4430_wifi_set_carddetect,
+};
+
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+static struct resource sdp4430_wifi_resources[] = {
+ [0] = {
+ .name = "device_wifi_irq",
+ .start = OMAP_GPIO_IRQ(SDP4430_WIFI_IRQ_GPIO),
+ .end = OMAP_GPIO_IRQ(SDP4430_WIFI_IRQ_GPIO),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+};
+
+static struct platform_device sdp4430_wifi_device = {
+ .name = "device_wifi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(sdp4430_wifi_resources),
+ .resource = sdp4430_wifi_resources,
+ .dev = {
+ .platform_data = &sdp4430_wifi_control,
+ },
+};
+#endif
+
+static int __init sdp4430_wifi_init(void)
+{
+ int ret;
+
+ printk(KERN_WARNING"%s: start\n", __func__);
+ ret = gpio_request(SDP4430_WIFI_IRQ_GPIO, "wifi_irq");
+ if (ret < 0) {
+ printk(KERN_ERR "%s: can't reserve GPIO: %d\n", __func__,
+ SDP4430_WIFI_IRQ_GPIO);
+ goto out;
+ }
+ gpio_direction_input(SDP4430_WIFI_IRQ_GPIO);
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ ret = platform_device_register(&sdp4430_wifi_device);
+#endif
+out:
+ return ret;
+}
+
+device_initcall(sdp4430_wifi_init);
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 55a5e6329882..f27705bff190 100644..100755
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -36,11 +36,16 @@
#include <plat/usb.h>
#include <plat/syntm12xx.h>
#include <plat/keypad.h>
+#include <plat/display.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include "mmc-twl4030.h"
+#include <linux/delay.h>
+
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
#define OMAP4_KBDOCP_BASE 0x4A31C000
@@ -202,14 +207,283 @@ static struct omap2_mcspi_device_config dummy2_mcspi_config = {
.single_channel = 0, /* 0: slave, 1: master */
};
#endif
+/* Display */
+static int sdp4430_panel_enable_lcd(struct omap_dss_device *dssdev) {
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+
+ gpio_request(DSI2_GPIO_104, "dsi2_en_gpio");
+ gpio_direction_output(DSI2_GPIO_104, 0);
+ mdelay(500);
+ gpio_set_value(DSI2_GPIO_104, 1);
+ mdelay(500);
+ gpio_set_value(DSI2_GPIO_104, 0);
+ mdelay(500);
+ gpio_set_value(DSI2_GPIO_104, 1);
+
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0xFF, PWM2ON); /*0xBD = 0xFF*/
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0x7F, PWM2OFF); /*0xBE = 0x7F*/
+ twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x30, TOGGLE3);
+
+ gpio_request(DSI2_GPIO_59, "dsi2_bl_gpio");
+ gpio_direction_output(DSI2_GPIO_59, 1);
+ mdelay(120);
+ gpio_set_value(DSI2_GPIO_59, 0);
+ mdelay(120);
+ gpio_set_value(DSI2_GPIO_59, 1);
+
+ } else {
+ gpio_request(DSI1_GPIO_102, "dsi1_en_gpio");
+ gpio_direction_output(DSI1_GPIO_102, 0);
+ mdelay(500);
+ gpio_set_value(DSI1_GPIO_102, 1);
+ mdelay(500);
+ gpio_set_value(DSI1_GPIO_102, 0);
+ mdelay(500);
+ gpio_set_value(DSI1_GPIO_102, 1);
+
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0xFF, PWM2ON); /*0xBD = 0xFF*/
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0x7F, PWM2OFF); /*0xBE = 0x7F*/
+ twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x30, TOGGLE3);
+
+ gpio_request(DSI1_GPIO_27, "dsi1_bl_gpio");
+ gpio_direction_output(DSI1_GPIO_27, 1);
+ mdelay(120);
+ gpio_set_value(DSI1_GPIO_27, 0);
+ mdelay(120);
+ gpio_set_value(DSI1_GPIO_27, 1);
+
+ }
-static struct platform_device sdp4430_lcd_device = {
- .name = "sdp4430_lcd",
- .id = -1,
+ return 0;
+}
+
+static int sdp4430_panel_disable_lcd(struct omap_dss_device *dssdev) {
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+ gpio_set_value(DSI2_GPIO_104, 1);
+ gpio_set_value(DSI2_GPIO_59, 0);
+ } else {
+ gpio_set_value(DSI1_GPIO_102, 1);
+ gpio_set_value(DSI1_GPIO_27, 0);
+ }
+ return 0;
+}
+
+static struct omap_device_pm_latency omap_dss_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ },
};
-static struct platform_device *sdp4430_devices[] __initdata = {
+static struct omap_dss_device sdp4430_lcd_device = {
+ .name = "lcd",
+ .driver_name = "panel-taal",
+ .type = OMAP_DISPLAY_TYPE_DSI,
+ .reset_gpio = 78,
+ .phy.dsi = {
+ .clk_lane = 1,
+ .clk_pol = 0,
+ .data1_lane = 2,
+ .data1_pol = 0,
+ .data2_lane = 3,
+ .data2_pol = 0,
+ .ext_te = true,
+ .ext_te_gpio = 101,
+ .div = {
+ .regm = 150,
+ .regn = 20,
+ .regm3 = 4,
+ .regm4 = 4,
+ .lck_div = 1,
+ .pck_div = 6,
+ .lp_clk_div = 6,
+ },
+ },
+ .platform_enable = sdp4430_panel_enable_lcd,
+ .platform_disable = sdp4430_panel_disable_lcd,
+ .channel = OMAP_DSS_CHANNEL_LCD,
+};
+
+static struct omap_dss_device sdp4430_lcd2_device = {
+ .name = "2lcd",
+ .driver_name = "panel-taal2",
+ .type = OMAP_DISPLAY_TYPE_DSI,
+ .reset_gpio = 78,
+ .phy.dsi = {
+ .clk_lane = 1,
+ .clk_pol = 0,
+ .data1_lane = 2,
+ .data1_pol = 0,
+ .data2_lane = 3,
+ .data2_pol = 0,
+ .ext_te = true,
+ .ext_te_gpio = 103,
+ .div = {
+ .regm = 150,
+ .regn = 20,
+ .regm3 = 4,
+ .regm4 = 4,
+ .lck_div = 1,
+ .pck_div = 6,
+ .lp_clk_div = 6,
+ },
+ },
+ .platform_enable = sdp4430_panel_enable_lcd,
+ .platform_disable = sdp4430_panel_disable_lcd,
+ .channel = OMAP_DSS_CHANNEL_LCD2,
+};
+
+static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
+{
+ gpio_request(HDMI_GPIO_60 , "hdmi_gpio_60");
+ gpio_request(HDMI_GPIO_41 , "hdmi_gpio_41");
+ gpio_direction_output(HDMI_GPIO_60, 0);
+ gpio_direction_output(HDMI_GPIO_41, 0);
+ gpio_set_value(HDMI_GPIO_60, 1);
+ gpio_set_value(HDMI_GPIO_41, 1);
+ gpio_set_value(HDMI_GPIO_60, 0);
+
+ return 0;
+}
+
+static int sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(HDMI_GPIO_60, 1);
+ gpio_set_value(HDMI_GPIO_41, 1);
+
+ return 0;
+}
+static void __init sdp4430_hdmi_init(void)
+{
+ return;
+}
+
+static struct omap_dss_device sdp4430_hdmi_device = {
+ .name = "hdmi",
+ .driver_name = "hdmi_panel",
+ .type = OMAP_DISPLAY_TYPE_HDMI,
+ .phy.dpi.data_lines = 24,
+ .platform_enable = sdp4430_panel_enable_hdmi,
+ .platform_disable = sdp4430_panel_disable_hdmi,
+};
+
+static int sdp4430_panel_enable_pico_DLP(struct omap_dss_device *dssdev)
+{
+ int i = 0;
+ gpio_request(DLP_4430_GPIO_59, "DLP DISPLAY SEL");
+ gpio_direction_output(DLP_4430_GPIO_59, 0);
+ gpio_request(DLP_4430_GPIO_45, "DLP PARK");
+ gpio_direction_output(DLP_4430_GPIO_45, 0);
+ gpio_request(DLP_4430_GPIO_40, "DLP PHY RESET");
+ gpio_direction_output(DLP_4430_GPIO_40, 0);
+ gpio_request(DLP_4430_GPIO_44, "DLP READY RESET");
+ gpio_direction_input(DLP_4430_GPIO_44);
+ mdelay(500);
+
+ gpio_set_value(DLP_4430_GPIO_59, 1);
+ gpio_set_value(DLP_4430_GPIO_45, 1);
+ mdelay(1000);
+
+ gpio_set_value(DLP_4430_GPIO_40, 1);
+ mdelay(1000);
+
+ /*FIXME with the MLO gpio changes , gpio read is not retuning correct value even though it is set in hardware so the check is comment till the problem is fixed */
+ /*while(i == 0){
+ i=gpio_get_value(DLP_4430_GPIO_44);
+ printk("wait for ready bit %d\n",i);
+ }*/
+ printk("%d ready bit ", i);
+ mdelay(2000);
+ return 0;
+}
+
+static int sdp4430_panel_disable_pico_DLP(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(DLP_4430_GPIO_40, 0);
+ gpio_set_value(DLP_4430_GPIO_45, 0);
+
+ return 0;
+}
+
+static struct omap_dss_device sdp4430_picoDLP_device = {
+ .name = "pico_DLP",
+ .driver_name = "picoDLP_panel",
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .phy.dpi.data_lines = 24,
+ .platform_enable = sdp4430_panel_enable_pico_DLP,
+ .platform_disable = sdp4430_panel_disable_pico_DLP,
+ .channel = OMAP_DSS_CHANNEL_LCD2,
+};
+
+
+
+static struct omap_dss_device *sdp4430_dss_devices[] = {
&sdp4430_lcd_device,
+ &sdp4430_lcd2_device,
+#ifdef CONFIG_OMAP2_DSS_HDMI
+ &sdp4430_hdmi_device,
+#endif
+#ifdef CONFIG_PANEL_PICO_DLP
+ &sdp4430_picoDLP_device,
+#endif
+};
+
+static struct omap_dss_board_info sdp4430_dss_data = {
+ .num_devices = ARRAY_SIZE(sdp4430_dss_devices),
+ .devices = sdp4430_dss_devices,
+ .default_device = &sdp4430_lcd_device,
+};
+
+static struct platform_device sdp4430_dss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &sdp4430_dss_data,
+ },
+};
+
+#define MAX_OMAP_DSS_HWMOD_NAME_LEN 16
+static const char name[] = "omapdss";
+struct omap_device *od;
+
+static void __init sdp4430_display_init(void) {
+ struct omap_hwmod *oh;
+ char oh_name[MAX_OMAP_DSS_HWMOD_NAME_LEN];
+ int l, idx;
+ struct omap_dss_platform_data *pdata;
+ int bus_id = 1;
+ idx = 1;
+
+ l = snprintf(oh_name, MAX_OMAP_DSS_HWMOD_NAME_LEN,
+ "dss");
+ WARN(l >= MAX_OMAP_DSS_HWMOD_NAME_LEN,
+ "String buffer overflow in DSS device setup\n");
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ return -EEXIST;
+ }
+
+ od = omap_device_build(name, -1, oh, &sdp4430_dss_data,
+ sizeof(struct omap_dss_board_info),
+ omap_dss_latency,
+ ARRAY_SIZE(omap_dss_latency), 0);
+
+ WARN(IS_ERR(od), "Could not build omap_device for %s %s\n",
+ name, oh_name);
+
+ return;
+}
+
+/* end Display */
+
+static struct regulator_consumer_supply sdp4430_vdda_dac_supply = {
+ .supply = "vdda_dac",
+ .dev = &sdp4430_dss_device.dev,
+};
+static struct platform_device *sdp4430_devices[] __initdata = {
&omap_kp_device,
};
@@ -286,6 +560,13 @@ static int __init sdp4430_mmc_init(void)
/* TODO: Fix Hard Coding */
mmc[0].gpio_cd = 384 ;
+#ifdef CONFIG_TIWLAN_SDIO
+ /* The controller that is connected to the 128x device
+ should have the card detect gpio disabled. This is
+ achieved by initializing it with a negative value */
+ mmc[CONFIG_TIWLAN_MMC_CONTROLLER - 1].gpio_cd = -EINVAL;
+#endif
+
twl4030_mmc_init(mmc);
/* link regulators to MMC adapters ... we "know" the
* regulators will be set up only *after* we return.
@@ -462,6 +743,35 @@ static struct regulator_init_data sdp4430_vdac = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &sdp4430_vdda_dac_supply,
+};
+
+/* VPLL2 for digital video outputs */
+static struct regulator_consumer_supply sdp4430_vpll2_supplies[] = {
+ {
+ .supply = "vdvi",
+ .dev = &sdp4430_lcd_device.dev,
+ },
+ {
+ .supply = "vdds_dsi",
+ .dev = &sdp4430_dss_device.dev,
+ }
+};
+
+static struct regulator_init_data sdp4430_vpll2 = {
+ .constraints = {
+ .name = "VDVI",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(sdp4430_vpll2_supplies),
+ .consumer_supplies = sdp4430_vpll2_supplies,
};
static struct regulator_init_data sdp4430_vusb = {
@@ -518,6 +828,12 @@ static struct twl4030_platform_data sdp4430_twldata = {
.codec = &twl6040_codec,
};
+static struct pico_platform_data picodlp_platform_data[] = {
+ [0] = { /* DLP Controller */
+ .gpio_intr = 40,
+ },
+};
+
static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
{
I2C_BOARD_INFO("twl6030", 0x48),
@@ -532,6 +848,10 @@ static struct i2c_board_info __initdata sdp4430_i2c_2_boardinfo[] = {
I2C_BOARD_INFO("tm12xx_ts_primary", 0x4b),
.platform_data = &tm12xx_platform_data[0],
},
+ {
+ I2C_BOARD_INFO("picoDLP_i2c_driver", 0x1b),
+ .platform_data = &picodlp_platform_data[0],
+ },
};
static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = {
@@ -593,6 +913,37 @@ static void omap_ethernet_init(void)
gpio_direction_input(34);
}
+#ifdef CONFIG_TIWLAN_SDIO
+static void pad_config(unsigned long pad_addr, u32 andmask, u32 ormask)
+{
+ int val;
+ u32 *addr;
+
+ addr = (u32 *) ioremap(pad_addr, 4);
+ if (!addr) {
+ printk(KERN_ERR"OMAP_pad_config: ioremap failed with addr %lx\n",
+ pad_addr);
+ return;
+ }
+
+ val = __raw_readl(addr);
+ val &= andmask;
+ val |= ormask;
+ __raw_writel(val, addr);
+
+ iounmap(addr);
+}
+
+void wlan_1283_config()
+{
+ pad_config(0x4A100078, 0xFFECFFFF, 0x00030000);
+ pad_config(0x4A10007C, 0xFFFFFFEF, 0x0000000B);
+ if (gpio_request(54, NULL) != 0)
+ printk(KERN_ERR "GPIO 54 request failed\n");
+ gpio_direction_output(54, 0);
+ return ;
+}
+#endif
static void __init omap_4430sdp_init(void)
{
omap4_i2c_init();
@@ -600,13 +951,17 @@ static void __init omap_4430sdp_init(void)
omap_serial_init();
/* OMAP4 SDP uses internal transceiver so register nop transceiver */
sdp4430_mmc_init();
+
+#ifdef CONFIG_TIWLAN_SDIO
+ wlan_1283_config();
+#endif
usb_nop_xceiv_register();
usb_musb_init(&musb_board_data);
omap_ethernet_init();
sdp4430_spi_board_info[0].irq = gpio_to_irq(34);
spi_register_board_info(sdp4430_spi_board_info,
ARRAY_SIZE(sdp4430_spi_board_info));
-
+ sdp4430_display_init();
}
static void __init omap_4430sdp_map_io(void)
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 0d1bddff4d20..8a9adcd24eb4 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -27,10 +27,10 @@
#define OMAP4_STATE_C2 1 /* C2 - CPU0 CSWR + CPU1 OFF + MPU CSWR + Core active */
#define OMAP4_STATE_C3 2 /* C3 - CPU0 CSWR + CPU1 OFF + MPU CSWR + Core CSWR */
-extern int (*_omap_sram_idle)(void);
-
#define wfi() \
{ \
+ isb(); \
+ wmb(); \
__asm__ __volatile__ ("wfi"); \
}
@@ -77,7 +77,7 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
{
struct omap4_processor_cx *cx = cpuidle_get_statedata(state);
struct timespec ts_preidle, ts_postidle, ts_idle;
- u32 scu_pwr_st;
+ u32 scu_pwr_st, cpu1_state;
/* Used to keep track of the total time in idle */
getnstimeofday(&ts_preidle);
@@ -88,8 +88,10 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
goto return_sleep_time;
}
- /* Do only a wfi as long as any other core is active */
- if (num_online_cpus() > 1) {
+ /* Make sure cpu1 is offlined before cpu0 idle's */
+ cpu1_state = pwrdm_read_pwrst(cpu1_pd);
+ /* Do only a wfi as long as CPU1 is not in RET/OFF */
+ if (cpu1_state > PWRDM_POWER_RET) {
wfi();
goto return_sleep_time;
}
@@ -111,10 +113,7 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
}
pwrdm_set_next_pwrst(core_pd, cx->core_state);
- if (_omap_sram_idle)
- _omap_sram_idle();
- else
- wfi();
+ wfi();
if (cx->core_state < PWRDM_POWER_ON) {
omap2_gpio_resume_after_retention();
diff --git a/arch/arm/mach-omap2/dmtimers.c b/arch/arm/mach-omap2/dmtimers.c
index e5f8db1cbf1b..509b8cb37802 100644
--- a/arch/arm/mach-omap2/dmtimers.c
+++ b/arch/arm/mach-omap2/dmtimers.c
@@ -248,6 +248,12 @@ void __init omap2_dm_timer_early_init(void)
pdata = kzalloc(sizeof(struct omap_dm_timer_plat_info),
GFP_KERNEL);
+ if (!pdata) {
+ WARN("gptimer%d :Memory allocation failed\n"
+ , i+1);
+ return;
+ }
+
pdata->omap_dm_clk_enable = omap2_dm_timer_enable;
pdata->omap_dm_clk_disable = omap2_dm_timer_disable;
pdata->omap_dm_set_source_clk = omap2_dm_timer_set_clk;
@@ -397,6 +403,12 @@ fail:
pdata = kzalloc(sizeof(struct omap_dm_timer_plat_info),
GFP_KERNEL);
+ if (!pdata) {
+ WARN("gptimer%d :Memory allocation failed\n"
+ , i+1);
+ return;
+ }
+
pdata->omap_dm_clk_enable = omap2_dm_timer_enable;
pdata->omap_dm_clk_disable = omap2_dm_timer_disable;
pdata->omap_dm_set_source_clk = omap2_dm_timer_set_clk;
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 5cff439a9869..7dc8e0ce92c5 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -1099,12 +1099,14 @@ void omap_gpio_restore_context(void)
static int __devexit omap_gpio_remove(struct platform_device *pdev)
{
- struct omap_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_gpio_platform_data *pdata;
struct gpio_bank *bank;
int id;
- if (!pdev || !pdata)
- return 0;
+ if (!pdev || !pdev->dev.platform_data)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
id = pdev->id;
if (id > gpio_bank_count)
@@ -1120,16 +1122,17 @@ static int __devexit omap_gpio_remove(struct platform_device *pdev)
static int __devinit omap_gpio_probe(struct platform_device *pdev)
{
static int show_rev_once;
- struct omap_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_gpio_platform_data *pdata;
struct gpio_bank *bank;
int id, i;
- if (!pdev || !pdata) {
+ if (!pdev || !pdev->dev.platform_data) {
pr_err("GPIO device initialize without"
"platform data\n");
return -EINVAL;
}
+ pdata = pdev->dev.platform_data;
gpio_bank_count = OMAP_NR_GPIOS;
#ifdef CONFIG_ARCH_OMAP2
if (cpu_is_omap242x())
@@ -1137,7 +1140,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
#endif
id = pdev->id;
- if (id > gpio_bank_count) {
+ if (id >= gpio_bank_count) {
pr_err("Invalid GPIO device id (%d)\n", id);
return -EINVAL;
}
@@ -1266,6 +1269,11 @@ void __init omap_gpio_early_init(void)
pdata = kzalloc(sizeof(struct omap_gpio_platform_data),
GFP_KERNEL);
+ if (!pdata) {
+ WARN("Memory allocation failed gpio%d \n", i + 1);
+ return;
+ }
+
pdata->base = oh->_rt_va;
pdata->irq = oh->mpu_irqs[0].irq;
pdata->virtual_irq_start = IH_GPIO_BASE + 32 * i;
@@ -1306,11 +1314,16 @@ int __init omap_init_gpio(void)
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
pr_err("Could not look up %s\n", oh_name);
+ i++;
continue;
}
pdata = kzalloc(sizeof(struct omap_gpio_platform_data),
GFP_KERNEL);
+ if (!pdata) {
+ WARN("Memory allocation failed gpio%d \n", i + 1);
+ return;
+ }
pdata->base = oh->_rt_va;
pdata->irq = oh->mpu_irqs[0].irq;
pdata->virtual_irq_start = IH_GPIO_BASE + 32 * i;
diff --git a/arch/arm/mach-omap2/include/mach/dmm.h b/arch/arm/mach-omap2/include/mach/dmm.h
new file mode 100644
index 000000000000..a3adddbf9fd6
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/dmm.h
@@ -0,0 +1,128 @@
+/*
+ * dmm.h
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DMM_H
+#define DMM_H
+
+#define DMM_BASE 0x4E000000
+#define DMM_SIZE 0x800
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_AREA__0 0x504
+#define DMM_PAT_CTRL__0 0x508
+#define DMM_PAT_DATA__0 0x50C
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+/**
+ * PAT refill programming mode.
+ */
+enum pat_mode {
+ MANUAL,
+ AUTO
+};
+
+/**
+ * Area definition for DMM physical address translator.
+ */
+struct pat_area {
+ s32 x0:8;
+ s32 y0:8;
+ s32 x1:8;
+ s32 y1:8;
+};
+
+/**
+ * DMM physical address translator control.
+ */
+struct pat_ctrl {
+ s32 start:4;
+ s32 dir:4;
+ s32 lut_id:8;
+ s32 sync:12;
+ s32 ini:4;
+};
+
+/**
+ * PAT descriptor.
+ */
+struct pat {
+ struct pat *next;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ u32 data;
+};
+
+/**
+ * Program the physical address translator.
+ * @param desc
+ * @param mode
+ * @return an error status.
+ */
+s32 dmm_pat_refill(struct pat *desc, enum pat_mode mode);
+
+/**
+ * Request a page from the DMM free page stack.
+ * @return a physical page address.
+ */
+u32 dmm_get_page(void);
+
+/**
+ * Return a used page to the DMM free page stack.
+ * @param page_addr a physical page address.
+ */
+void dmm_free_page(u32 page_addr);
+
+/**
+ * Request a set of pages from the DMM free page stack.
+ * @return a pointer to a list of physical page addresses.
+ */
+u32 *dmm_get_pages(s32 n);
+
+/**
+ * Return a set of used pages to the DMM free page stack.
+ * @param list a pointer to a list of physical page addresses.
+ */
+void dmm_free_pages(u32 *list);
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/tiler.h b/arch/arm/mach-omap2/include/mach/tiler.h
new file mode 100644
index 000000000000..b8adf1439b6f
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/tiler.h
@@ -0,0 +1,116 @@
+/*
+ * tiler.h
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef TILER_H
+#define TILER_H
+
+#define TILER_PAGE 0x1000
+#define TILER_WIDTH 256
+#define TILER_HEIGHT 128
+#define TILER_BLOCK_WIDTH 64
+#define TILER_BLOCK_HEIGHT 64
+#define TILER_LENGTH (TILER_WIDTH * TILER_HEIGHT * TILER_PAGE)
+
+#define TILER_MAX_NUM_BLOCKS 16
+
+#define TILIOC_GBUF _IOWR('z', 100, u32)
+#define TILIOC_FBUF _IOWR('z', 101, u32)
+#define TILIOC_GSSP _IOWR('z', 102, u32)
+#define TILIOC_MBUF _IOWR('z', 103, u32)
+#define TILIOC_UMBUF _IOWR('z', 104, u32)
+#define TILIOC_QBUF _IOWR('z', 105, u32)
+#define TILIOC_RBUF _IOWR('z', 106, u32)
+#define TILIOC_URBUF _IOWR('z', 107, u32)
+#define TILIOC_QUERY_BLK _IOWR('z', 108, u32)
+
+enum tiler_fmt {
+ TILFMT_MIN = -1,
+ TILFMT_INVALID = -1,
+ TILFMT_NONE = 0,
+ TILFMT_8BIT = 1,
+ TILFMT_16BIT = 2,
+ TILFMT_32BIT = 3,
+ TILFMT_PAGE = 4,
+ TILFMT_MAX = 4
+};
+
+struct area {
+ u16 width;
+ u16 height;
+};
+
+struct tiler_block_info {
+ enum tiler_fmt fmt;
+ union {
+ struct area area;
+ u32 len;
+ } dim;
+ u32 stride;
+ void *ptr;
+ u32 ssptr;
+};
+
+struct tiler_buf_info {
+ s32 num_blocks;
+ struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
+ s32 offset;
+};
+
+struct tiler_view_orient {
+ u8 rotate_90;
+ u8 x_invert;
+ u8 y_invert;
+};
+
+/**
+ * Request a 1-D or 2-D TILER buffer.
+ *
+ * @param fmt TILER bit mode.
+ * @param width buffer width.
+ * @param height buffer height.
+ * @param sys_addr system space (L3) address.
+ *
+ * @return an error status.
+ */
+s32 tiler_alloc(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr);
+
+/**
+ * Free TILER memory.
+ * @param sys_addr system space (L3) address.
+ * @return an error status.
+ */
+s32 tiler_free(u32 sys_addr);
+
+u32 tiler_reorient_addr(u32 tsptr, struct tiler_view_orient orient);
+
+u32 tiler_get_natural_addr(void *sys_ptr);
+
+u32 tiler_reorient_topleft(u32 tsptr, struct tiler_view_orient orient,
+ u32 width, u32 height);
+
+u32 tiler_stride(u32 tsptr);
+
+void tiler_rotate_view(struct tiler_view_orient *orient, u32 rotation);
+
+void tiler_alloc_packed(s32 *count, enum tiler_fmt fmt, u32 width, u32 height,
+ void **sysptr, void **allocptr, s32 aligned);
+
+void tiler_alloc_packed_nv12(s32 *count, u32 width, u32 height, void **y_sysptr,
+ void **uv_sysptr, void **y_allocptr,
+ void **uv_allocptr, s32 aligned);
+
+#endif
+
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
index 6f4b7cc8f4d1..2735bd769890 100644
--- a/arch/arm/mach-omap2/iommu2.c
+++ b/arch/arm/mach-omap2/iommu2.c
@@ -146,6 +146,8 @@ static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra)
printk("\n");
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
+ /* Disable MMU to stop continuous generation of MMU faults */
+ omap2_iommu_disable(obj);
return stat;
}
@@ -183,7 +185,7 @@ static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e)
if (!cr)
return ERR_PTR(-ENOMEM);
- cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz;
+ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
@@ -211,7 +213,8 @@ static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x\n", cr->cam, cr->ram);
+ p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 281ab6342448..55ea58e74f94 100644..100755
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -40,6 +40,10 @@
#define AUTOIDLE (1 << 0)
#define SOFTRESET (1 << 1)
#define SMARTIDLE (2 << 3)
+#define OMAP4_SOFTRESET (1 << 0)
+#define OMAP4_SMARTIDLE (2 << 2)
+#define OMAP4_NOIDLE (1 << 2)
+
/* SYSSTATUS: register bit definition */
#define RESETDONE (1 << 0)
@@ -91,32 +95,54 @@ static int omap2_mbox_startup(struct omap_mbox *mbox)
u32 l;
unsigned long timeout;
- mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
- if (IS_ERR(mbox_ick_handle)) {
- printk(KERN_ERR "Could not get mailboxes_ick: %d\n",
- PTR_ERR(mbox_ick_handle));
- return PTR_ERR(mbox_ick_handle);
+ if (!cpu_is_omap44xx()) {
+ mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
+ if (IS_ERR(mbox_ick_handle)) {
+ printk(KERN_ERR "Could not get mailboxes_ick: %ld\n",
+ PTR_ERR(mbox_ick_handle));
+ return PTR_ERR(mbox_ick_handle);
+ }
+ clk_enable(mbox_ick_handle);
}
- clk_enable(mbox_ick_handle);
-
- mbox_write_reg(SOFTRESET, MAILBOX_SYSCONFIG);
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- l = mbox_read_reg(MAILBOX_SYSSTATUS);
- if (l & RESETDONE)
- break;
- } while (!time_after(jiffies, timeout));
-
- if (!(l & RESETDONE)) {
- pr_err("Can't take mmu out of reset\n");
- return -ENODEV;
+
+ if (cpu_is_omap44xx()) {
+ mbox_write_reg(OMAP4_SOFTRESET, MAILBOX_SYSCONFIG);
+ timeout = jiffies + msecs_to_jiffies(20);
+ do {
+ l = mbox_read_reg(MAILBOX_SYSCONFIG);
+ if (!(l & OMAP4_SOFTRESET))
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (l & OMAP4_SOFTRESET) {
+ pr_err("Can't take mailbox out of reset\n");
+ return -ENODEV;
+ }
+ } else {
+ mbox_write_reg(SOFTRESET, MAILBOX_SYSCONFIG);
+ timeout = jiffies + msecs_to_jiffies(20);
+ do {
+ l = mbox_read_reg(MAILBOX_SYSSTATUS);
+ if (l & RESETDONE)
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(l & RESETDONE)) {
+ pr_err("Can't take mailbox out of reset\n");
+ return -ENODEV;
+ }
}
l = mbox_read_reg(MAILBOX_REVISION);
pr_info("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
- l = SMARTIDLE | AUTOIDLE;
- mbox_write_reg(l, MAILBOX_SYSCONFIG);
+ if (cpu_is_omap44xx()) {
+ l = OMAP4_NOIDLE; /* TODO: Change to SMARTIDLE */
+ mbox_write_reg(l, MAILBOX_SYSCONFIG);
+ } else {
+ l = SMARTIDLE | AUTOIDLE;
+ mbox_write_reg(l, MAILBOX_SYSCONFIG);
+ }
omap2_mbox_enable_irq(mbox, IRQ_RX);
@@ -125,9 +151,11 @@ static int omap2_mbox_startup(struct omap_mbox *mbox)
static void omap2_mbox_shutdown(struct omap_mbox *mbox)
{
- clk_disable(mbox_ick_handle);
- clk_put(mbox_ick_handle);
- mbox_ick_handle = NULL;
+ if (!cpu_is_omap44xx()) {
+ clk_disable(mbox_ick_handle);
+ clk_put(mbox_ick_handle);
+ mbox_ick_handle = NULL;
+ }
}
/* Mailbox FIFO handle functions */
@@ -419,8 +447,10 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
#endif
return 0;
+#if defined(CONFIG_ARCH_OMAP2420) /* IVA */
err_iva1:
omap_mbox_unregister(&mbox_dsp_info);
+#endif
err_dsp:
iounmap(mbox_base);
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index baa451733850..524bb7dc9769 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <mach/irqs.h>
@@ -22,6 +23,21 @@
#include <plat/mux.h>
#include <plat/cpu.h>
#include <plat/mcbsp.h>
+#include <plat/dma.h>
+
+#define OMAP_MCBSP_READ(base, reg) \
+ omap_mcbsp_read(base, OMAP_MCBSP_REG_##reg)
+#define OMAP_MCBSP_WRITE(base, reg, val) \
+ omap_mcbsp_write(base, OMAP_MCBSP_REG_##reg, val)
+
+struct omap_mcbsp_reg_cfg mcbsp_cfg = {0};
+
+struct mcbsp_internal_clk {
+ struct clk clk;
+ struct clk **childs;
+ int n_childs;
+};
+
static void omap2_mcbsp2_mux_setup(void)
{
@@ -209,8 +225,830 @@ static struct omap_mcbsp_platform_data omap44xx_mcbsp_pdata[] = {
};
#define OMAP44XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap44xx_mcbsp_pdata)
+static void omap2_mcbsp_free(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+
+ if (!cpu_is_omap2420()) {
+ if (mcbsp->dma_rx_lch != -1) {
+ omap_free_dma_chain(mcbsp->dma_rx_lch);
+ mcbsp->dma_rx_lch = -1;
+ }
+
+ if (mcbsp->dma_tx_lch != -1) {
+ omap_free_dma_chain(mcbsp->dma_tx_lch);
+ mcbsp->dma_tx_lch = -1;
+ }
+ }
+ return;
+}
+void omap2_mcbsp_config(unsigned int id,
+ const struct omap_mcbsp_reg_cfg *config)
+{
+ struct omap_mcbsp *mcbsp;
+ void __iomem *io_base;
+ mcbsp = id_to_mcbsp_ptr(id);
+ io_base = mcbsp->io_base;
+ OMAP_MCBSP_WRITE(io_base, XCCR, config->xccr);
+ OMAP_MCBSP_WRITE(io_base, RCCR, config->rccr);
+}
+
+static void omap2_mcbsp_rx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct omap_mcbsp *mcbsp_dma_rx = data;
+ void __iomem *io_base;
+ io_base = mcbsp_dma_rx->io_base;
+
+
+
+ /* If we are at the last transfer, Shut down the reciever */
+ if ((mcbsp_dma_rx->auto_reset & OMAP_MCBSP_AUTO_RRST)
+ && (omap_dma_chain_status(mcbsp_dma_rx->dma_rx_lch) ==
+ OMAP_DMA_CHAIN_INACTIVE))
+ OMAP_MCBSP_WRITE(io_base, SPCR1,
+ OMAP_MCBSP_READ(io_base, SPCR1) & (~RRST));
+
+ if (mcbsp_dma_rx->rx_callback != NULL)
+ mcbsp_dma_rx->rx_callback(ch_status, mcbsp_dma_rx->rx_cb_arg);
+
+}
+
+static void omap2_mcbsp_tx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct omap_mcbsp *mcbsp_dma_tx = data;
+ void __iomem *io_base;
+ io_base = mcbsp_dma_tx->io_base;
+
+ /* If we are at the last transfer, Shut down the Transmitter */
+ if ((mcbsp_dma_tx->auto_reset & OMAP_MCBSP_AUTO_XRST)
+ && (omap_dma_chain_status(mcbsp_dma_tx->dma_tx_lch) ==
+ OMAP_DMA_CHAIN_INACTIVE))
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) & (~XRST));
+
+ if (mcbsp_dma_tx->tx_callback != NULL)
+ mcbsp_dma_tx->tx_callback(ch_status, mcbsp_dma_tx->tx_cb_arg);
+}
+
+/*
+ * Enable/Disable the sample rate generator
+ * id : McBSP interface ID
+ * state : Enable/Disable
+ */
+void omap2_mcbsp_set_srg_fsg(unsigned int id, u8 state)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ void __iomem *io_base;
+
+ io_base = mcbsp->io_base;
+
+ if (state == OMAP_MCBSP_DISABLE_FSG_SRG) {
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) & (~GRST));
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) & (~FRST));
+ } else {
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) | GRST);
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) | FRST);
+ }
+ return;
+}
+
+/*
+ * Stop transmitting data on a McBSP interface
+ * id : McBSP interface ID
+ */
+int omap2_mcbsp_stop_datatx(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ void __iomem *io_base;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ io_base = mcbsp->io_base;
+
+ if (mcbsp->dma_tx_lch != -1) {
+ if (omap_stop_dma_chain_transfers(mcbsp->dma_tx_lch) != 0)
+ return -EINVAL;
+ }
+ mcbsp->tx_dma_chain_state = 0;
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) & (~XRST));
+
+ if (!mcbsp->rx_dma_chain_state)
+ omap2_mcbsp_set_srg_fsg(id, OMAP_MCBSP_DISABLE_FSG_SRG);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_stop_datatx);
+
+/*
+ * Stop receving data on a McBSP interface
+ * id : McBSP interface ID
+ */
+int omap2_mcbsp_stop_datarx(u32 id)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ void __iomem *io_base;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ io_base = mcbsp->io_base;
+
+ if (mcbsp->dma_rx_lch != -1) {
+ if (omap_stop_dma_chain_transfers(mcbsp->dma_rx_lch) != 0)
+ return -EINVAL;
+ }
+ OMAP_MCBSP_WRITE(io_base, SPCR1,
+ OMAP_MCBSP_READ(io_base, SPCR1) & (~RRST));
+
+ mcbsp->rx_dma_chain_state = 0;
+ if (!mcbsp->tx_dma_chain_state)
+ omap2_mcbsp_set_srg_fsg(id, OMAP_MCBSP_DISABLE_FSG_SRG);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_stop_datarx);
+
+/*
+ * Get the element index and frame index of transmitter
+ * id : McBSP interface ID
+ * ei : element index
+ * fi : frame index
+ */
+int omap2_mcbsp_transmitter_index(int id, int *ei, int *fi)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ int eix = 0, fix = 0;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ if ((!ei) || (!fi)) {
+ printk(KERN_ERR "OMAP_McBSP: Invalid ei and fi params \n");
+ goto txinx_err;
+ }
+
+ if (mcbsp->dma_tx_lch == -1) {
+ printk(KERN_ERR "OMAP_McBSP: Transmitter not started\n");
+ goto txinx_err;
+ }
+
+ if (omap_get_dma_chain_index
+ (mcbsp->dma_tx_lch, &eix, &fix) != 0) {
+ printk(KERN_ERR "OMAP_McBSP: Getting chain index failed\n");
+ goto txinx_err;
+ }
+
+ *ei = eix;
+ *fi = fix;
+
+ return 0;
+
+txinx_err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL(omap2_mcbsp_transmitter_index);
+/*
+ * Get the element index and frame index of receiver
+ * id : McBSP interface ID
+ * ei : element index
+ * fi : frame index
+ */
+int omap2_mcbsp_receiver_index(int id, int *ei, int *fi)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ int eix = 0, fix = 0;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ if ((!ei) || (!fi)) {
+ printk(KERN_ERR "OMAP_McBSP: Invalid ei and fi params x\n");
+ goto rxinx_err;
+ }
+
+ /* Check if chain exists */
+ if (mcbsp->dma_rx_lch == -1) {
+ printk(KERN_ERR "OMAP_McBSP: Receiver not started\n");
+ goto rxinx_err;
+ }
+
+ /* Get dma_chain_index */
+ if (omap_get_dma_chain_index
+ (mcbsp->dma_rx_lch, &eix, &fix) != 0) {
+ printk(KERN_ERR "OMAP_McBSP: Getting chain index failed\n");
+ goto rxinx_err;
+ }
+
+ *ei = eix;
+ *fi = fix;
+ return 0;
+
+rxinx_err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL(omap2_mcbsp_receiver_index);
+
+/*
+ * Basic Reset Transmitter
+ * id : McBSP interface number
+ * state : Disable (0)/ Enable (1) the transmitter
+ */
+int omap2_mcbsp_set_xrst(unsigned int id, u8 state)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ void __iomem *io_base;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ io_base = mcbsp->io_base;
+
+ if (state == OMAP_MCBSP_XRST_DISABLE)
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) & (~XRST));
+ else
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) | XRST);
+ udelay(10);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_set_xrst);
+
+/*
+ * Reset Receiver
+ * id : McBSP interface number
+ * state : Disable (0)/ Enable (1) the receiver
+ */
+int omap2_mcbsp_set_rrst(unsigned int id, u8 state)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ void __iomem *io_base;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ io_base = mcbsp->io_base;
+
+ if (state == OMAP_MCBSP_RRST_DISABLE)
+ OMAP_MCBSP_WRITE(io_base, SPCR1,
+ OMAP_MCBSP_READ(io_base, SPCR1) & (~RRST));
+ else
+ OMAP_MCBSP_WRITE(io_base, SPCR1,
+ OMAP_MCBSP_READ(io_base, SPCR1) | RRST);
+ udelay(10);
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_set_rrst);
+
+/*
+ * Configure the receiver parameters
+ * id : McBSP Interface ID
+ * rp : DMA Receive parameters
+ */
+int omap2_mcbsp_dma_recv_params(unsigned int id,
+ struct omap_mcbsp_dma_transfer_params *rp)
+{
+ struct omap_mcbsp *mcbsp;
+ void __iomem *io_base;
+ int err, chain_id = -1;
+ struct omap_dma_channel_params rx_params;
+ u32 dt = 0;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ mcbsp = id_to_mcbsp_ptr(id);
+ io_base = mcbsp->io_base;
+ dt = rp->word_length1;
+
+ if (dt == OMAP_MCBSP_WORD_8)
+ rx_params.data_type = OMAP_DMA_DATA_TYPE_S8;
+ else if (dt == OMAP_MCBSP_WORD_16)
+ rx_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+ else if (dt == OMAP_MCBSP_WORD_32)
+ rx_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ else
+ return -EINVAL;
+
+ rx_params.read_prio = DMA_CH_PRIO_HIGH;
+ rx_params.write_prio = DMA_CH_PRIO_HIGH;
+ rx_params.sync_mode = OMAP_DMA_SYNC_ELEMENT;
+ rx_params.src_fi = 0;
+ rx_params.trigger = mcbsp->dma_rx_sync;
+ rx_params.src_or_dst_synch = 0x01;
+ rx_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ rx_params.src_ei = 0x0;
+ /* Indexing is always in bytes - so multiply with dt */
+
+ dt = (rx_params.data_type == OMAP_DMA_DATA_TYPE_S8) ? 1 :
+ (rx_params.data_type == OMAP_DMA_DATA_TYPE_S16) ? 2 : 4;
+
+ /* SKIP_FIRST and SKIP_SECOND- skip alternate data in 24 bit mono */
+ if (rp->skip_alt == OMAP_MCBSP_SKIP_SECOND) {
+ rx_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ rx_params.dst_ei = (1);
+ rx_params.dst_fi = (1) + ((-1) * dt);
+ } else if (rp->skip_alt == OMAP_MCBSP_SKIP_FIRST) {
+ rx_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ rx_params.dst_ei = 1 + (-2) * dt;
+ rx_params.dst_fi = 1 + (2) * dt;
+ } else {
+ rx_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ rx_params.dst_ei = 0;
+ rx_params.dst_fi = 0;
+ }
+
+ mcbsp->rxskip_alt = rp->skip_alt;
+ mcbsp->auto_reset &= ~OMAP_MCBSP_AUTO_RRST;
+ mcbsp->auto_reset |= (rp->auto_reset & OMAP_MCBSP_AUTO_RRST);
+
+ mcbsp->rx_word_length = rx_params.data_type << 0x1;
+ if (rx_params.data_type == 0)
+ mcbsp->rx_word_length = 1;
+
+ mcbsp->rx_callback = rp->callback;
+ /* request for a chain of dma channels for data reception */
+ if (mcbsp->dma_rx_lch == -1) {
+ err = omap_request_dma_chain(id, "McBSP RX",
+ omap2_mcbsp_rx_dma_callback, &chain_id,
+ 2, OMAP_DMA_DYNAMIC_CHAIN, rx_params);
+ if (err < 0) {
+ printk(KERN_ERR "Receive path configuration failed \n");
+ return -EINVAL;
+ }
+ mcbsp->dma_rx_lch = chain_id;
+ mcbsp->rx_dma_chain_state = 0;
+ } else {
+ /* DMA params already set, modify the same!! */
+ err = omap_modify_dma_chain_params(mcbsp->dma_rx_lch,
+ rx_params);
+ if (err < 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_dma_recv_params);
+
+/*
+ * Configure the transmitter parameters
+ * id : McBSP Interface ID
+ * tp : DMA Transfer parameters
+ */
+
+int omap2_mcbsp_dma_trans_params(unsigned int id,
+ struct omap_mcbsp_dma_transfer_params *tp)
+{
+ struct omap_mcbsp *mcbsp;
+
+ struct omap_dma_channel_params tx_params;
+ int err = 0, chain_id = -1;
+ void __iomem *io_base;
+ u32 dt = 0;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ mcbsp = id_to_mcbsp_ptr(id);
+ io_base = mcbsp->io_base;
+
+ dt = tp->word_length1;
+ if ((dt != OMAP_MCBSP_WORD_8) && (dt != OMAP_MCBSP_WORD_16)
+ && (dt != OMAP_MCBSP_WORD_32))
+ return -EINVAL;
+ if (dt == OMAP_MCBSP_WORD_8)
+ tx_params.data_type = OMAP_DMA_DATA_TYPE_S8;
+ else if (dt == OMAP_MCBSP_WORD_16)
+ tx_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+ else if (dt == OMAP_MCBSP_WORD_32)
+ tx_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ else
+ return -EINVAL;
+
+ tx_params.read_prio = DMA_CH_PRIO_HIGH;
+ tx_params.write_prio = DMA_CH_PRIO_HIGH;
+ tx_params.sync_mode = OMAP_DMA_SYNC_ELEMENT;
+ tx_params.dst_fi = 0;
+ tx_params.trigger = mcbsp->dma_tx_sync;
+ tx_params.src_or_dst_synch = 0;
+ tx_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ tx_params.dst_ei = 0;
+ /* Indexing is always in bytes - so multiply with dt */
+ mcbsp->tx_word_length = tx_params.data_type << 0x1;
+
+ if (tx_params.data_type == 0)
+ mcbsp->tx_word_length = 1;
+ dt = mcbsp->tx_word_length;
+
+ /* SKIP_FIRST and SKIP_SECOND- skip alternate data in 24 bit mono */
+ if (tp->skip_alt == OMAP_MCBSP_SKIP_SECOND) {
+ tx_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ tx_params.src_ei = (1);
+ tx_params.src_fi = (1) + ((-1) * dt);
+ } else if (tp->skip_alt == OMAP_MCBSP_SKIP_FIRST) {
+ tx_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ tx_params.src_ei = 1 + (-2) * dt;
+ tx_params.src_fi = 1 + (2) * dt;
+ } else {
+ tx_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+ tx_params.src_ei = 0;
+ tx_params.src_fi = 0;
+ }
+
+ mcbsp->txskip_alt = tp->skip_alt;
+ mcbsp->auto_reset &= ~OMAP_MCBSP_AUTO_XRST;
+ mcbsp->auto_reset |=
+ (tp->auto_reset & OMAP_MCBSP_AUTO_XRST);
+ mcbsp->tx_callback = tp->callback;
+
+ /* Based on Rjust we can do double indexing DMA params configuration */
+ if (mcbsp->dma_tx_lch == -1) {
+ err = omap_request_dma_chain(id, "McBSP TX",
+ omap2_mcbsp_tx_dma_callback, &chain_id,
+ 2, OMAP_DMA_DYNAMIC_CHAIN, tx_params);
+ if (err < 0) {
+ printk(KERN_ERR
+ "Transmit path configuration failed \n");
+ return -EINVAL;
+ }
+ mcbsp->tx_dma_chain_state = 0;
+ mcbsp->dma_tx_lch = chain_id;
+ } else {
+ /* DMA params already set, modify the same!! */
+ err = omap_modify_dma_chain_params(mcbsp->dma_tx_lch,
+ tx_params);
+ if (err < 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_dma_trans_params);
+
+/*
+ * Start receving data on a McBSP interface
+ * id : McBSP interface ID
+ * cbdata : User data to be returned with callback
+ * buf_start_addr : The destination address [physical address]
+ * buf_size : Buffer size
+*/
+
+int omap2_mcbsp_receive_data(unsigned int id, void *cbdata,
+ dma_addr_t buf_start_addr, u32 buf_size)
+{
+ struct omap_mcbsp *mcbsp;
+ void __iomem *io_base;
+ int enable_rx = 0;
+ int e_count = 0;
+ int f_count = 0;
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ io_base = mcbsp->io_base;
+ mcbsp->rx_cb_arg = cbdata;
+
+ /* Auto RRST handling logic - disable the Reciever before 1st dma */
+ if ((mcbsp->auto_reset & OMAP_MCBSP_AUTO_RRST) &&
+ (omap_dma_chain_status(mcbsp->dma_rx_lch)
+ == OMAP_DMA_CHAIN_INACTIVE)) {
+ OMAP_MCBSP_WRITE(io_base, SPCR1,
+ OMAP_MCBSP_READ(io_base, SPCR1) & (~RRST));
+ enable_rx = 1;
+ }
+
+ /*
+ * for skip_first and second, we need to set e_count =2,
+ * and f_count = number of frames = number of elements/e_count
+ */
+ e_count = (buf_size / mcbsp->rx_word_length);
+
+ if (mcbsp->rxskip_alt != OMAP_MCBSP_SKIP_NONE) {
+ /*
+ * since the number of frames = total number of elements/element
+ * count, However, with double indexing for data transfers,
+ * double the number of elements need to be transmitted
+ */
+ f_count = e_count;
+ e_count = 2;
+ } else {
+ f_count = 1;
+ }
+ /*
+ * If the DMA is to be configured to skip the first byte, we need
+ * to jump backwards, so we need to move one chunk forward and
+ * ask dma if we dont want the client driver knowing abt this.
+ */
+ if (mcbsp->rxskip_alt == OMAP_MCBSP_SKIP_FIRST)
+ buf_start_addr += mcbsp->rx_word_length;
+
+ if (omap_dma_chain_a_transfer(mcbsp->dma_rx_lch,
+ mcbsp->phys_base + OMAP_MCBSP_REG_DRR, buf_start_addr,
+ e_count, f_count, mcbsp) < 0) {
+ printk(KERN_ERR " Buffer chaining failed \n");
+ return -EINVAL;
+ }
+ if (mcbsp->rx_dma_chain_state == 0) {
+ if (omap_start_dma_chain_transfers(mcbsp->dma_rx_lch) < 0)
+ return -EINVAL;
+ mcbsp->rx_dma_chain_state = 1;
+ }
+ /* Auto RRST handling logic - Enable the Reciever after 1st dma */
+ if (enable_rx &&
+ (omap_dma_chain_status(mcbsp->dma_rx_lch)
+ == OMAP_DMA_CHAIN_ACTIVE))
+ OMAP_MCBSP_WRITE(io_base, SPCR1,
+ OMAP_MCBSP_READ(io_base, SPCR1) | RRST);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_receive_data);
+
+/*
+ * Start transmitting data through a McBSP interface
+ * id : McBSP interface ID
+ * cbdata : User data to be returned with callback
+ * buf_start_addr : The source address [This should be physical address]
+ * buf_size : Buffer size
+ */
+int omap2_mcbsp_send_data(unsigned int id, void *cbdata,
+ dma_addr_t buf_start_addr, u32 buf_size)
+{
+ struct omap_mcbsp *mcbsp;
+ void __iomem *io_base;
+ u8 enable_tx = 0;
+ int e_count = 0;
+ int f_count = 0;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ io_base = mcbsp->io_base;
+
+ mcbsp->tx_cb_arg = cbdata;
+
+ /* Auto RRST handling logic - disable the Reciever before 1st dma */
+ if ((mcbsp->auto_reset & OMAP_MCBSP_AUTO_XRST) &&
+ (omap_dma_chain_status(mcbsp->dma_tx_lch)
+ == OMAP_DMA_CHAIN_INACTIVE)) {
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) & (~XRST));
+ enable_tx = 1;
+ }
+ /*
+ * for skip_first and second, we need to set e_count =2, and
+ * f_count = number of frames = number of elements/e_count
+ */
+ e_count = (buf_size / mcbsp->tx_word_length);
+ if (mcbsp->txskip_alt != OMAP_MCBSP_SKIP_NONE) {
+ /*
+ * number of frames = total number of elements/element count,
+ * However, with double indexing for data transfers, double I
+ * the number of elements need to be transmitted
+ */
+ f_count = e_count;
+ e_count = 2;
+ } else {
+ f_count = 1;
+ }
+
+ /*
+ * If the DMA is to be configured to skip the first byte, we need
+ * to jump backwards, so we need to move one chunk forward and ask
+ * dma if we dont want the client driver knowing abt this.
+ */
+ if (mcbsp->txskip_alt == OMAP_MCBSP_SKIP_FIRST)
+ buf_start_addr += mcbsp->tx_word_length;
+
+ if (omap_dma_chain_a_transfer(mcbsp->dma_tx_lch,
+ buf_start_addr, mcbsp->phys_base + OMAP_MCBSP_REG_DXR,
+ e_count, f_count, mcbsp) < 0)
+ return -EINVAL;
+
+ if (mcbsp->tx_dma_chain_state == 0) {
+ if (omap_start_dma_chain_transfers(mcbsp->dma_tx_lch) < 0)
+ return -EINVAL;
+ mcbsp->tx_dma_chain_state = 1;
+ }
+
+ /* Auto XRST handling logic - Enable the Reciever after 1st dma */
+ if (enable_tx &&
+ (omap_dma_chain_status(mcbsp->dma_tx_lch)
+ == OMAP_DMA_CHAIN_ACTIVE))
+ OMAP_MCBSP_WRITE(io_base, SPCR2,
+ OMAP_MCBSP_READ(io_base, SPCR2) | XRST);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap2_mcbsp_send_data);
+
+void omap2_mcbsp_set_recv_param(struct omap_mcbsp_reg_cfg *mcbsp_cfg,
+ struct omap_mcbsp_cfg_param *rp)
+{
+ mcbsp_cfg->spcr1 = RJUST(rp->justification);
+ mcbsp_cfg->rcr2 = RCOMPAND(rp->reverse_compand) |
+ RDATDLY(rp->data_delay);
+ if (rp->phase == OMAP_MCBSP_FRAME_SINGLEPHASE)
+ mcbsp_cfg->rcr2 = mcbsp_cfg->rcr2 & ~(RPHASE);
+ else
+ mcbsp_cfg->rcr2 = mcbsp_cfg->rcr2 | (RPHASE) |
+ RWDLEN2(rp->word_length2) | RFRLEN2(rp->frame_length2);
+ mcbsp_cfg->rcr1 = RWDLEN1(rp->word_length1) |
+ RFRLEN1(rp->frame_length1);
+ if (rp->fsync_src == OMAP_MCBSP_RXFSYNC_INTERNAL)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | FSRM;
+ if (rp->clk_mode == OMAP_MCBSP_CLKRXSRC_INTERNAL)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | CLKRM;
+ if (rp->clk_polarity == OMAP_MCBSP_CLKR_POLARITY_RISING)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | CLKRP;
+ if (rp->fs_polarity == OMAP_MCBSP_FS_ACTIVE_LOW)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | FSRP;
+ return;
+}
+
+/*
+ * Set McBSP transmit parameters
+ * id : McBSP interface ID
+ * mcbsp_cfg : McBSP register configuration
+ * tp : McBSP transmit parameters
+ */
+
+void omap2_mcbsp_set_trans_param(struct omap_mcbsp_reg_cfg *mcbsp_cfg,
+ struct omap_mcbsp_cfg_param *tp)
+{
+ mcbsp_cfg->xcr2 = XCOMPAND(tp->reverse_compand) |
+ XDATDLY(tp->data_delay);
+ if (tp->phase == OMAP_MCBSP_FRAME_SINGLEPHASE)
+ mcbsp_cfg->xcr2 = mcbsp_cfg->xcr2 & ~(XPHASE);
+ else
+ mcbsp_cfg->xcr2 = mcbsp_cfg->xcr2 | (XPHASE) |
+ RWDLEN2(tp->word_length2) | RFRLEN2(tp->frame_length2);
+ mcbsp_cfg->xcr1 = XWDLEN1(tp->word_length1) |
+ XFRLEN1(tp->frame_length1);
+ if (tp->fs_polarity == OMAP_MCBSP_FS_ACTIVE_LOW)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | FSXP;
+ if (tp->fsync_src == OMAP_MCBSP_TXFSYNC_INTERNAL)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | FSXM;
+ if (tp->clk_mode == OMAP_MCBSP_CLKTXSRC_INTERNAL)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | CLKXM;
+ if (tp->clk_polarity == OMAP_MCBSP_CLKX_POLARITY_FALLING)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | CLKXP;
+ return;
+}
+
+ /*
+ * Set McBSP SRG configuration
+ * id : McBSP interface ID
+ * mcbsp_cfg : McBSP register configuration
+ * interface_mode : Master/Slave
+ * param : McBSP SRG and FSG configuration
+ */
+
+void omap2_mcbsp_set_srg_cfg_param(unsigned int id, int interface_mode,
+ struct omap_mcbsp_reg_cfg *mcbsp_cfg,
+ struct omap_mcbsp_srg_fsg_cfg *param)
+{
+ struct omap_mcbsp *mcbsp = mcbsp_ptr[id];
+ void __iomem *io_base;
+ u32 clk_rate, clkgdv;
+ io_base = mcbsp->io_base;
+
+ mcbsp->interface_mode = interface_mode;
+ mcbsp_cfg->srgr1 = FWID(param->pulse_width);
+
+ if (interface_mode == OMAP_MCBSP_MASTER) {
+ clk_rate = clk_get_rate(mcbsp->fclk);
+ clkgdv = clk_rate / (param->sample_rate *
+ (param->bits_per_sample - 1));
+ if (clkgdv > 0xFF )
+ clkgdv = 0xFF;
+ mcbsp_cfg->srgr1 = mcbsp_cfg->srgr1 | CLKGDV(clkgdv);
+ }
+ if (param->dlb)
+ mcbsp_cfg->spcr1 = mcbsp_cfg->spcr1 & ~(ALB);
+
+ if (param->sync_mode == OMAP_MCBSP_SRG_FREERUNNING)
+ mcbsp_cfg->spcr2 = mcbsp_cfg->spcr2 | FREE;
+ mcbsp_cfg->srgr2 = FPER(param->period)|(param->fsgm? FSGM : 0);
+
+ switch (param->srg_src) {
+
+ case OMAP_MCBSP_SRGCLKSRC_CLKS:
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 & ~(SCLKME);
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 & ~(CLKSM);
+ /*
+ * McBSP master operation at low voltage is only possible if
+ * CLKSP=0 In Master mode, if client driver tries to configiure
+ * input clock polarity as falling edge, we force it to Rising
+ */
+
+ if ((param->polarity == OMAP_MCBSP_CLKS_POLARITY_RISING) ||
+ (interface_mode == OMAP_MCBSP_MASTER))
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 & ~(CLKSP);
+ else
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 | (CLKSP);
+ break;
+
+
+ case OMAP_MCBSP_SRGCLKSRC_FCLK:
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 & ~(SCLKME);
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 | (CLKSM);
+
+ break;
+
+ case OMAP_MCBSP_SRGCLKSRC_CLKR:
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | (SCLKME);
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 & ~(CLKSM);
+ if (param->polarity == OMAP_MCBSP_CLKR_POLARITY_FALLING)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 & ~(CLKRP);
+ else
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | (CLKRP);
+
+ break;
+
+ case OMAP_MCBSP_SRGCLKSRC_CLKX:
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | (SCLKME);
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 | (CLKSM);
+
+ if (param->polarity == OMAP_MCBSP_CLKX_POLARITY_RISING)
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 & ~(CLKXP);
+ else
+ mcbsp_cfg->pcr0 = mcbsp_cfg->pcr0 | (CLKXP);
+ break;
+
+ }
+ if (param->sync_mode == OMAP_MCBSP_SRG_FREERUNNING)
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 & ~(GSYNC);
+ else if (param->sync_mode == OMAP_MCBSP_SRG_RUNNING)
+ mcbsp_cfg->srgr2 = mcbsp_cfg->srgr2 | (GSYNC);
+
+ mcbsp_cfg->xccr = OMAP_MCBSP_READ(io_base, XCCR);
+ if (param->dlb)
+ mcbsp_cfg->xccr = mcbsp_cfg->xccr | (DILB);
+ mcbsp_cfg->rccr = OMAP_MCBSP_READ(io_base, RCCR);
+
+ return;
+ }
+
+/*
+ * configure the McBSP registers
+ * id : McBSP interface ID
+ * interface_mode : Master/Slave
+ * rp : McBSP recv parameters
+ * tp : McBSP transmit parameters
+ * param : McBSP SRG and FSG configuration
+ */
+void omap2_mcbsp_params_cfg(unsigned int id, int interface_mode,
+ struct omap_mcbsp_cfg_param *rp,
+ struct omap_mcbsp_cfg_param *tp,
+ struct omap_mcbsp_srg_fsg_cfg *param)
+{
+ if (rp)
+ omap2_mcbsp_set_recv_param(&mcbsp_cfg, rp);
+ if (tp)
+ omap2_mcbsp_set_trans_param(&mcbsp_cfg, tp);
+ if (param)
+ omap2_mcbsp_set_srg_cfg_param(id,
+ interface_mode, &mcbsp_cfg, param);
+ omap_mcbsp_config(id, &mcbsp_cfg);
+
+ return;
+}
+EXPORT_SYMBOL(omap2_mcbsp_params_cfg);
+
+
static int __init omap2_mcbsp_init(void)
{
+
+
+
if (cpu_is_omap2420())
omap_mcbsp_count = OMAP2420_MCBSP_PDATA_SZ;
if (cpu_is_omap2430())
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index cc27a6bbfafd..334bce573842 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -25,6 +25,11 @@
#include <plat/mmc.h>
#include <plat/board.h>
+#ifdef CONFIG_TIWLAN_SDIO
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+#endif
+
#include "mmc-twl4030.h"
#include <linux/i2c/twl.h>
@@ -40,11 +45,6 @@ static u16 control_mmc1;
/* Hack : Phoenix registers*/
#define PHOENIX_CFG_INPUT_PUPD3 0xF2
-#define MMC_GRP 0x68
-#define MMC_TRANS 0x69
-#define MMC_STATE 0x6a
-#define MMC_VOLTAGE 0x6b
-
static struct twl_mmc_controller {
struct omap_mmc_platform_data *mmc;
@@ -62,39 +62,36 @@ static struct twl_mmc_controller {
static int twl_mmc_card_detect(int irq)
{
unsigned i;
- if (!cpu_is_omap44xx()) {
- for (i = 0; i < ARRAY_SIZE(hsmmc); i++) {
- struct omap_mmc_platform_data *mmc;
+ u8 read_reg = 0;
+ unsigned res;
- mmc = hsmmc[i].mmc;
- if (!mmc)
- continue;
- if (irq != mmc->slots[0].card_detect_irq)
- continue;
+ for (i = 0; i < ARRAY_SIZE(hsmmc); i++) {
+ struct omap_mmc_platform_data *mmc;
- /* NOTE: assumes card detect signal is active-low */
- return !gpio_get_value_cansleep
- (mmc->slots[0].switch_pin);
- }
- return -ENOSYS;
- } else {
- /* BIT0 of REG_SIMCTRL
- * 0 - Card not present
- * 1 - Card present
- */
- u8 read_reg;
- unsigned res;
+ mmc = hsmmc[i].mmc;
+ if (!mmc)
+ continue;
+ if (irq != mmc->slots[0].card_detect_irq)
+ continue;
- res = twl_i2c_read_u8(TWL4030_MODULE_INTBR,
- &read_reg, PHOENIX_MMC_CTRL);
- if (res < 0) {
- printk(KERN_ERR"%s: i2c_read fail at %x \n",
- __func__, PHOENIX_MMC_CTRL);
- return -1;
+ /* NOTE: assumes card detect signal is active-low */
+ if (!cpu_is_omap44xx()) {
+ return !gpio_get_value_cansleep
+ (mmc->slots[0].switch_pin);
} else {
- return read_reg & 0x1;
+ /* BIT0 of REG_MMC_CTRL
+ * 0 - Card not present
+ * 1 - Card present
+ */
+ if (mmc->slots[0].nonremovable)
+ return 1;
+ res = twl_i2c_read_u8(TWL4030_MODULE_INTBR,
+ &read_reg, PHOENIX_MMC_CTRL);
+ if (res >= 0)
+ return read_reg & 0x1;
}
}
+ return -ENOSYS;
}
static int twl_mmc_get_ro(struct device *dev, int slot)
@@ -121,7 +118,6 @@ static int twl_mmc_late_init(struct device *dev)
struct omap_mmc_platform_data *mmc = dev->platform_data;
int ret = 0;
int i;
- u8 regs;
/* MMC/SD/SDIO doesn't require a card detect switch */
if (!cpu_is_omap44xx()) {
@@ -189,19 +185,6 @@ static int twl_mmc_late_init(struct device *dev)
}
/* Configure Phoenix for MMC1 Card detect */
if (i == 0) {
- regs = 0x01;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_GRP);
- regs = 0x03;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_TRANS);
- regs = 0x21;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_STATE);
- regs = 0x15;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_VOLTAGE);
- msleep(200);
twl_i2c_write_u8(TWL4030_MODULE_INTBR,
0x04, PHOENIX_MMC_CTRL);
twl_i2c_write_u8(TWL4030_MODULE_INTBR,
@@ -278,7 +261,6 @@ static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
int ret = 0;
struct twl_mmc_controller *c = &hsmmc[0];
struct omap_mmc_platform_data *mmc = dev->platform_data;
- u8 regs;
/*
* Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
@@ -324,40 +306,7 @@ static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
OMAP4_MMC1_PWRDWNZ);
}
omap_ctrl_writel(reg, control_pbias_offset);
-
- /* Hack need to fix it */
- if ((vdd == 0x12) || (vdd == 0x7)) {
- regs = 0x01;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_GRP);
- regs = 0x03;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_TRANS);
- regs = 0x21;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_STATE);
- regs = 0x15;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_VOLTAGE);
- } else {
- regs = 0x01;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_GRP);
- regs = 0x03;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_TRANS);
- regs = 0x00;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_STATE);
- regs = 0x09;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_STATE);
- regs = 0x15;
- twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
- regs, MMC_VOLTAGE);
- }
- if (!cpu_is_omap44xx())
- ret = mmc_regulator_set_ocr(c->vcc, vdd);
+ ret = mmc_regulator_set_ocr(c->vcc, vdd);
/* 100ms delay required for PBIAS configuration */
msleep(100);
@@ -391,9 +340,7 @@ static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
OMAP4_MMC1_PWRDWNZ);
}
omap_ctrl_writel(reg, control_pbias_offset);
-
- if (!cpu_is_omap44xx())
- ret = mmc_regulator_set_ocr(c->vcc, 0);
+ ret = mmc_regulator_set_ocr(c->vcc, 0);
/* 100ms delay required for PBIAS configuration */
msleep(100);
@@ -420,8 +367,6 @@ static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int v
struct omap_mmc_platform_data *mmc = dev->platform_data;
int i;
- if (cpu_is_omap44xx())
- return 0;
for (i = 1; i < ARRAY_SIZE(hsmmc); i++) {
if (mmc == hsmmc[i].mmc) {
@@ -453,13 +398,15 @@ static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int v
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
- /* only MMC2 supports a CLKIN */
- if (mmc->slots[0].internal_clock) {
- u32 reg;
+ if (!cpu_is_omap44xx()) {
+ /* only MMC2 supports a CLKIN */
+ if (mmc->slots[0].internal_clock) {
+ u32 reg;
- reg = omap_ctrl_readl(control_devconf1_offset);
- reg |= OMAP2_MMCSDIO2ADPCLKISEL;
- omap_ctrl_writel(reg, control_devconf1_offset);
+ reg = omap_ctrl_readl(control_devconf1_offset);
+ reg |= OMAP2_MMCSDIO2ADPCLKISEL;
+ omap_ctrl_writel(reg, control_devconf1_offset);
+ }
}
ret = mmc_regulator_set_ocr(c->vcc, vdd);
/* enable interface voltage rail, if needed */
@@ -538,6 +485,29 @@ static int twl_mmc23_set_sleep(struct device *dev, int slot, int sleep, int vdd,
static struct omap_mmc_platform_data *hsmmc_data[OMAP44XX_NR_MMC] __initdata;
+#ifdef CONFIG_TIWLAN_SDIO
+static struct sdio_embedded_func wifi_func_array[] = {
+ {
+ .f_class = SDIO_CLASS_BT_A,
+ .f_maxblksize = 512,
+ },
+ {
+ .f_class = SDIO_CLASS_WLAN,
+ .f_maxblksize = 512,
+ },
+};
+
+static struct embedded_sdio_data omap_wifi_emb_data = {
+ .cis = {
+ .vendor = SDIO_VENDOR_ID_TI,
+ .device = SDIO_DEVICE_ID_TI_WL12xx,
+ .blksize = 512,
+ .max_dtr = 24000000,
+ },
+ .funcs = wifi_func_array,
+};
+#endif
+
void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
{
struct twl4030_hsmmc_info *c;
@@ -591,6 +561,16 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
else
snprintf(twl->name, ARRAY_SIZE(twl->name),
"mmc%islot%i", c->mmc, 1);
+
+#ifdef CONFIG_TIWLAN_SDIO
+ if (c->mmc == CONFIG_TIWLAN_MMC_CONTROLLER) {
+ mmc->slots[0].embedded_sdio = &omap_wifi_emb_data;
+ mmc->slots[0].register_status_notify =
+ &omap_wifi_status_register;
+ mmc->slots[0].card_detect = &omap_wifi_status;
+ }
+#endif
+
mmc->slots[0].name = twl->name;
mmc->nr_slots = 1;
mmc->slots[0].wires = c->wires;
@@ -622,6 +602,12 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
mmc->slots[0].card_detect_irq = 384;
else
mmc->slots[0].card_detect_irq = 0;
+ if (c->cover_only)
+ mmc->slots[0].get_cover_state =
+ twl_mmc_get_cover_state;
+ else
+ mmc->slots[0].card_detect =
+ twl_mmc_card_detect;
}
mmc->get_context_loss_count =
@@ -676,6 +662,9 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
/* off-chip level shifting, or none */
mmc->slots[0].set_power = twl_mmc23_set_power;
mmc->slots[0].set_sleep = twl_mmc23_set_sleep;
+#ifdef CONFIG_TIWLAN_SDIO
+ mmc->slots[0].ocr_mask = MMC_VDD_165_195;
+#endif
break;
default:
pr_err("MMC%d configuration not supported!\n", c->mmc);
diff --git a/arch/arm/mach-omap2/mmc-twl4030.h b/arch/arm/mach-omap2/mmc-twl4030.h
index a47e68563fb6..1da38a26c2bf 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.h
+++ b/arch/arm/mach-omap2/mmc-twl4030.h
@@ -21,6 +21,12 @@ struct twl4030_hsmmc_info {
int ocr_mask; /* temporary HACK */
};
+#ifdef CONFIG_TIWLAN_SDIO
+int omap_wifi_status_register(void (*callback)(int card_present,
+ void *dev_id), void *dev_id);
+int omap_wifi_status(int irq);
+#endif
+
#if defined(CONFIG_REGULATOR) && \
(defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE))
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 66499115ac54..605a39fb4b22 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -27,37 +27,20 @@ static DECLARE_COMPLETION(cpu_killed);
static inline void cpu_enter_lowpower(void)
{
- unsigned int v;
-
flush_cache_all();
- /* FIXME: check L2 state and see if the l2 flush is necessary */
-
- asm volatile(
- " mcr p15, 0, %1, c7, c5, 0\n"
- " mcr p15, 0, %1, c7, c10, 4\n"
- /* FIXME: Need to use secure API for AUX control */
-
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, #0x04\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
- : "r" (0)
- : "cc");
+ /* FIXME: Code for OFF /RET */
}
static inline void cpu_leave_lowpower(void)
{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, #0x04\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- /* FIXME: Need to use secure API for AUX control */
+ struct powerdomain *cpu1_pd;
+ u32 scu_pwr_st;
- : "=&r" (v)
- :
- : "cc");
+ scu_pwr_st = omap_readl(0x48240008);
+ scu_pwr_st &= ~0x8;
+ omap_writel(scu_pwr_st, 0x48240008);
+ cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
+ pwrdm_set_next_pwrst(cpu1_pd, PWRDM_POWER_ON);
}
static inline void omap_do_lowpower(unsigned int cpu)
@@ -65,29 +48,17 @@ static inline void omap_do_lowpower(unsigned int cpu)
u32 scu_pwr_st;
struct powerdomain *cpu1_pd;
-
- if(omap_modify_auxcoreboot0(0x0, 0x200) != 0x0) {
+ if(omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
printk(KERN_CRIT "Secure clear status failed\n");
- BUG();
- }
- /*
- * FIXME: Hook up the omap low power here.
- */
for (;;) {
- /* Program the possible low power state here */
-
- /* FIXME: On the ES2.0 silicon SCU power state
- * register can be accessed using only secure API
- * Also use ioremap instead of omap_read/write here
- */
- /* set SCU power status register to dormant mode */
+ cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
+ pwrdm_set_next_pwrst(cpu1_pd, PWRDM_POWER_RET);
scu_pwr_st = omap_readl(0x48240008);
scu_pwr_st |= 0x8;
omap_writel(scu_pwr_st, 0x48240008);
- cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
- pwrdm_set_next_pwrst(cpu1_pd, PWRDM_POWER_RET);
- dsb();
+ isb();
+ wmb();
asm volatile("wfi\n"
:
:
@@ -97,11 +68,6 @@ static inline void omap_do_lowpower(unsigned int cpu)
/*
* OK, proper wakeup, we're done
*/
- pwrdm_set_next_pwrst(cpu1_pd, PWRDM_POWER_ON);
- scu_pwr_st = omap_readl(0x48240008);
- scu_pwr_st &= ~0x8;
- omap_writel(scu_pwr_st, 0x48240008);
- local_irq_enable();
break;
}
#ifdef DEBUG
diff --git a/arch/arm/mach-omap2/omap4-iommu.c b/arch/arm/mach-omap2/omap4-iommu.c
new file mode 100644
index 000000000000..6225616f4e9e
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-iommu.c
@@ -0,0 +1,110 @@
+/*
+ * omap iommu: omap4 device registration
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Written by Hari Kanigeri <h-kanigeri2@ti.com>
+ *
+ * Added support for OMAP4. This is based on original file
+ * omap3-iommu.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+
+#include <plat/iommu.h>
+#include <plat/irqs.h>
+
+#define OMAP4_MMU1_BASE 0x55082000
+#define OMAP4_MMU2_BASE 0x4A066000
+
+#define OMAP4_MMU1_IRQ INT_44XX_DUCATI_MMU_IRQ
+#define OMAP4_MMU2_IRQ INT_44XX_DSP_MMU
+
+
+
+static unsigned long iommu_base[] __initdata = {
+ OMAP4_MMU1_BASE,
+ OMAP4_MMU2_BASE,
+};
+
+static int iommu_irq[] __initdata = {
+ OMAP4_MMU1_IRQ,
+ OMAP4_MMU2_IRQ,
+};
+
+static const struct iommu_platform_data omap4_iommu_pdata[] __initconst = {
+ {
+ .name = "ducati",
+ .nr_tlb_entries = 32,
+ },
+#if defined(CONFIG_MPU_TESLA_IOMMU)
+ {
+ .name = "tesla",
+ .nr_tlb_entries = 32,
+ },
+#endif
+};
+#define NR_IOMMU_DEVICES ARRAY_SIZE(omap4_iommu_pdata)
+
+static struct platform_device *omap4_iommu_pdev[NR_IOMMU_DEVICES];
+
+static int __init omap4_iommu_init(void)
+{
+ int i, err;
+
+ for (i = 0; i < NR_IOMMU_DEVICES; i++) {
+ struct platform_device *pdev;
+ struct resource res[2];
+
+ pdev = platform_device_alloc("omap-iommu", i);
+ if (!pdev) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(res, 0, sizeof(res));
+ res[0].start = iommu_base[i];
+ res[0].end = iommu_base[i] + MMU_REG_SIZE - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = res[1].end = iommu_irq[i];
+ res[1].flags = IORESOURCE_IRQ;
+
+ err = platform_device_add_resources(pdev, res,
+ ARRAY_SIZE(res));
+ if (err)
+ goto err_out;
+ err = platform_device_add_data(pdev, &omap4_iommu_pdata[i],
+ sizeof(omap4_iommu_pdata[0]));
+ if (err)
+ goto err_out;
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_out;
+ omap4_iommu_pdev[i] = pdev;
+ }
+ return 0;
+
+err_out:
+ while (i--)
+ platform_device_put(omap4_iommu_pdev[i]);
+ return err;
+}
+module_init(omap4_iommu_init);
+
+static void __exit omap4_iommu_exit(void)
+{
+ int i;
+
+ for (i = 0; i < NR_IOMMU_DEVICES; i++)
+ platform_device_unregister(omap4_iommu_pdev[i]);
+}
+module_exit(omap4_iommu_exit);
+
+MODULE_AUTHOR("Hiroshi DOYU, Hari Kanigeri");
+MODULE_DESCRIPTION("omap iommu: omap4 device registration");
+MODULE_LICENSE("GPL v2");
+
diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S
index 89bb2b141473..f61c7771ca47 100644
--- a/arch/arm/mach-omap2/omap44xx-smc.S
+++ b/arch/arm/mach-omap2/omap44xx-smc.S
@@ -27,6 +27,6 @@ ENTRY(omap_smc1)
mov r12, r0
mov r0, r1
dsb
- smc
+ smc #0
ldmfd sp!, {r2-r12, pc}
END(omap_smc1)
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 77f0cd29869f..dc2e8cfbd5f1 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -51,13 +51,11 @@ extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
extern void omap34xx_cpu_suspend(u32 *addr, int save_state);
extern void save_secure_ram_context(u32 *addr);
extern void omap3_save_scratchpad_contents(void);
-extern void omap44xx_cpu_suspend(void);
extern unsigned int omap24xx_idle_loop_suspend_sz;
extern unsigned int omap34xx_suspend_sz;
extern unsigned int save_secure_ram_context_sz;
extern unsigned int omap24xx_cpu_suspend_sz;
extern unsigned int omap34xx_cpu_suspend_sz;
-extern unsigned int omap44xx_cpu_suspend_sz;
#endif
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index d375ab45a133..d13aff317f61 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -18,7 +18,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <plat/control.h>
-#include <plat/sram.h>
#include <plat/clockdomain.h>
#include <plat/powerdomain.h>
#include "prm.h"
@@ -38,8 +37,6 @@ struct power_state {
static LIST_HEAD(pwrst_list);
-int (*_omap_sram_idle)(void);
-
static struct powerdomain *cpu0_pwrdm, *cpu1_pwrdm, *mpu_pwrdm;
/* This sets pwrdm state (other than mpu & core. Currently only ON &
@@ -114,13 +111,12 @@ static int omap4_pm_suspend(void)
if (set_pwrdm_state(pwrst->pwrdm, PWRDM_POWER_RET))
goto restore;
- if (_omap_sram_idle)
- _omap_sram_idle();
- else
- asm volatile("wfi\n"
- :
- :
- : "memory", "cc");
+ isb();
+ wmb();
+ asm volatile("wfi\n"
+ :
+ :
+ : "memory", "cc");
restore:
/* Restore next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node) {
@@ -215,14 +211,6 @@ static int __attribute__ ((unused)) __init
return 0;
}
-#ifdef CONFIG_PM
-void omap_push_sram_idle(void)
-{
- _omap_sram_idle = omap_sram_push(omap44xx_cpu_suspend,
- omap44xx_cpu_suspend_sz);
-}
-#endif
-
void __raw_rmw_reg_bits(u32 mask, u32 bits, const volatile void __iomem *addr)
{
u32 v;
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 30e23e12cc9e..ed19c1e5bf26 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -27,6 +27,8 @@
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <plat/omap-serial.h>
+
#include <plat/common.h>
#include <plat/board.h>
#include <plat/clock.h>
@@ -544,6 +546,7 @@ static void omap_uart_block_sleep(struct omap_uart_state *uart)
#define DEV_CREATE_FILE(dev, attr)
#endif /* CONFIG_PM */
+#ifdef CONFIG_SERIAL_8250
/*
* Override the default 8250 read handler: mem_serial_in()
* Empty RX fifo read causes an abort on omap3630 and omap4
@@ -560,21 +563,7 @@ static unsigned int serial_in_override(struct uart_port *up, int offset)
}
return __serial_read_reg(up, offset);
}
-
-static void serial_out_override(struct uart_port *up, int offset, int value)
-{
- unsigned int status, tmout = 10000;
-
- /* Wait up to 10ms for the character(s) to be sent. */
- do {
- status = __serial_read_reg(up, UART_LSR);
- if (--tmout == 0)
- break;
- udelay(1);
- } while (!(status & UART_LSR_THRE));
-
- __serial_write_reg(up, offset, value);
-}
+#endif
void __init omap_serial_early_init(void)
{
@@ -625,7 +614,21 @@ void __init omap_serial_early_init(void)
void __init omap_serial_init_port(int port)
{
struct omap_uart_state *uart;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ void *pdata = NULL;
+ u32 pdata_size = 0;
char *name;
+#ifdef CONFIG_SERIAL_8250
+ struct plat_serial8250_port ports[2] = {
+ {},
+ {.flags = 0},
+ };
+ struct plat_serial8250_port *p = &ports[0];
+#endif
+#ifdef CONFIG_SERIAL_OMAP
+ struct omap_uart_port_info omap_up;
+#endif
BUG_ON(port < 0);
BUG_ON(port >= num_uarts);
@@ -634,20 +637,10 @@ void __init omap_serial_init_port(int port)
if (port == uart->num)
break;
{
-
- struct omap_hwmod *oh = uart->oh;
- struct omap_device *od;
- void *pdata = NULL;
- u32 pdata_size = 0;
-
- struct plat_serial8250_port ports[2] = {
- {},
- {.flags = 0},
- };
- struct plat_serial8250_port *p = &ports[0];
-
- name = "serial8250";
+ oh = uart->oh;
uart->dma_enabled = 0;
+#ifdef CONFIG_SERIAL_8250
+ name = "serial8250";
/*
* !! 8250 driver does not use standard IORESOURCE* It
@@ -673,13 +666,25 @@ void __init omap_serial_init_port(int port)
* omap3xxx: Never read empty UART fifo on UARTs
* with IP rev >=0x52
*/
- if (cpu_is_omap44xx()) {
+ if (cpu_is_omap44xx())
p->serial_in = serial_in_override;
- p->serial_out = serial_out_override;
- }
pdata = &ports[0];
pdata_size = 2 * sizeof(struct plat_serial8250_port);
+#endif
+#ifdef CONFIG_SERIAL_OMAP
+ name = DRIVER_NAME;
+
+ omap_up.dma_enabled = uart->dma_enabled;
+ omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
+ omap_up.mapbase = oh->slaves[0]->addr->pa_start;
+ omap_up.membase = oh->_rt_va;
+ omap_up.irqflags = IRQF_SHARED;
+ omap_up.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+
+ pdata = &omap_up;
+ pdata_size = sizeof(struct omap_uart_port_info);
+#endif
if (WARN_ON(!oh))
return;
@@ -727,7 +732,6 @@ void __init omap_serial_init_port(int port)
device_init_wakeup(&od->pdev.dev, true);
DEV_CREATE_FILE(&od->pdev.dev, &dev_attr_sleep_timeout);
}
-
}
}
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
deleted file mode 100644
index 7181a95b0252..000000000000
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/sleep44xx.S
- *
- * (C) Copyright 2010
- * Texas Instruments
- * Rajendra Nayak <rnayak@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-/*
- * Forces OMAP into idle state
- *
- * omap44xx_suspend() - This bit of code just executes the WFI
- * for normal idles.
- *
- * Note: This code get's copied to internal SRAM at boot. When the OMAP
- * wakes up it continues execution at the point it went to sleep.
- */
-ENTRY(omap44xx_cpu_suspend)
- stmfd sp!, {r0-r12, lr} @ save registers on stack
-loop:
- /*b loop*/ @Enable to debug by stepping through code
- /* Data memory barrier and Data sync barrier */
- /* FIXME: check whther we need this code executed from SRAM ? */
- dsb
-
- wfi @ wait for interrupt
-
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
-ENTRY(omap44xx_cpu_suspend_sz)
- .word . - omap44xx_cpu_suspend
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 7148e53e6078..8ebffdd6fcff 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-fa_dma_inv_range:
+ENTRY(fa_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
@@ -180,7 +180,7 @@ fa_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-fa_dma_clean_range:
+ENTRY(fa_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -241,5 +241,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_kern_dcache_area
.long fa_dma_map_area
.long fa_dma_unmap_area
+ .long fa_dma_inv_range
+ .long fa_dma_clean_range
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2ff3c599fee..6df52dc014be 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -84,6 +84,20 @@ ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -94,6 +108,17 @@ ENTRY(v3_flush_kern_dcache_area)
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_clean_range)
mov pc, lr
/*
@@ -104,7 +129,7 @@ ENTRY(v3_dma_flush_range)
*/
ENTRY(v3_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
+ bne v3_dma_inv_range
/* FALLTHROUGH */
/*
@@ -130,5 +155,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_kern_dcache_area
.long v3_dma_map_area
.long v3_dma_unmap_area
+ .long v3_dma_inv_range
+ .long v3_dma_clean_range
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 4810f7e3e813..df3b423713b9 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -94,6 +94,20 @@ ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -106,6 +120,17 @@ ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_clean_range)
mov pc, lr
/*
@@ -116,7 +141,7 @@ ENTRY(v4_dma_flush_range)
*/
ENTRY(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v4_dma_flush_range
+ bne v4_dma_inv_range
/* FALLTHROUGH */
/*
@@ -142,5 +167,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_kern_dcache_area
.long v4_dma_map_area
.long v4_dma_unmap_area
+ .long v4_dma_inv_range
+ .long v4_dma_clean_range
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index df8368afa102..32e7a7448496 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range)
* - start - virtual start address
* - end - virtual end address
*/
-v4wb_dma_inv_range:
+ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -194,7 +194,7 @@ v4wb_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-v4wb_dma_clean_range:
+ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -252,5 +252,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_dcache_area
.long v4wb_dma_map_area
.long v4wb_dma_unmap_area
+ .long v4wb_dma_inv_range
+ .long v4wb_dma_clean_range
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 45c70312f43b..3d8dad5b2650 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -142,12 +142,23 @@ ENTRY(v4wt_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-v4wt_dma_inv_range:
+ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
@@ -196,5 +207,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_dcache_area
.long v4wt_dma_map_area
.long v4wt_dma_unmap_area
+ .long v4wt_dma_inv_range
+ .long v4wt_dma_clean_range
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index b9f2cbdc4a52..cdc8dd6ce56d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v6_dma_inv_range:
+ENTRY(v6_dma_inv_range)
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -231,7 +231,7 @@ v6_dma_inv_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v6_dma_clean_range:
+ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef CONFIG_SMP
@@ -311,5 +311,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_kern_dcache_area
.long v6_dma_map_area
.long v6_dma_unmap_area
+ .long v6_dma_inv_range
+ .long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bcd64f265870..69d0f01ac38a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
+#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+#endif
dsb
isb
mov pc, lr
@@ -216,7 +220,7 @@ ENDPROC(v7_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_inv_range:
+ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -240,7 +244,7 @@ ENDPROC(v7_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_clean_range:
+ENTRY(v7_dma_clean_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -309,5 +313,7 @@ ENTRY(v7_cache_fns)
.long v7_flush_kern_dcache_area
.long v7_dma_map_area
.long v7_dma_unmap_area
+ .long v7_dma_inv_range
+ .long v7_dma_clean_range
.long v7_dma_flush_range
.size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 64daef2173bd..940d44b3c59d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -614,3 +614,86 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+#ifdef CONFIG_UNOFFICIAL_USER_DMA_API
+int temp_user_dma_op(unsigned long start, unsigned long end, int op)
+{
+ struct mm_struct *mm = current->active_mm;
+ void (*inner_op)(const void *, const void *);
+ void (*outer_op)(unsigned long, unsigned long);
+
+ if (!test_taint(TAINT_USER)) {
+ printk(KERN_WARNING "%s: using unofficial user DMA API, kernel tainted.\n",
+ current->comm);
+ add_taint(TAINT_USER);
+ }
+
+ switch (op) {
+ case 1:
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
+ break;
+ case 2:
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
+ break;
+ case 3:
+ inner_op = dmac_flush_range;
+ outer_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (end < start)
+ return -EINVAL;
+
+ down_read(&mm->mmap_sem);
+ do {
+ struct vm_area_struct *vma = find_vma(mm, start);
+
+ if (!vma || start < vma->vm_start ||
+ vma->vm_flags & (VM_IO | VM_PFNMAP)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ do {
+ unsigned long e = (start | ~PAGE_MASK) + 1;
+ struct page *page;
+
+ if (e > end)
+ e = end;
+
+ page = follow_page(vma, start, FOLL_GET);
+ if (IS_ERR(page)) {
+ up_read(&mm->mmap_sem);
+ return PTR_ERR(page);
+ }
+
+ if (page) {
+ unsigned long phys;
+
+ /*
+ * This flushes the userspace address - which
+ * is not what this API was intended to do.
+ * Things may go astray as a result.
+ */
+ inner_op((void *)start, (void *)e);
+
+ /*
+ * Now handle the L2 cache.
+ */
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ outer_op(phys, phys + e - start);
+
+ put_page(page);
+ }
+ start = e;
+ } while (start < end && start < vma->vm_end);
+ } while (start < end);
+ up_read(&mm->mmap_sem);
+
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ab75c60f7cf..1cde59dd05f9 100644..100755
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -338,6 +338,109 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
}
EXPORT_SYMBOL(__arm_ioremap);
+#define MAX_SECTIONS 4
+void __iomem *
+__arm_multi_strided_ioremap(int sections,
+ unsigned long *phys_addr, size_t *phys_size,
+ unsigned long *phys_stride,
+ unsigned long *virt_stride,
+ unsigned int mtype)
+{
+ unsigned long pfns[MAX_SECTIONS];
+ const struct mem_type *type;
+ unsigned long total_size = 0, j;
+ int err = 0, i;
+ unsigned long addr, addr_i, pstride, vstride;
+ struct vm_struct * area;
+
+ if (sections > MAX_SECTIONS)
+ return NULL;
+
+ for (i = 0; i < sections; i++) {
+ /* both physical and virtual strides must be both specified
+ or neither specified */
+ pstride = ((phys_stride && phys_stride[i]) ?
+ phys_stride[i] : phys_size[i]);
+ vstride = ((virt_stride && virt_stride[i]) ?
+ virt_stride[i] : phys_size[i]);
+
+ if (!pstride ^ !vstride)
+ return NULL;
+
+ /*
+ * Don't allow wraparound or zero size. Also, sections
+ * must end/begin on page boundary, and strides be page
+ * aligned
+ *
+ * For now, size must be multiple of physical stride. This
+ * may be relaxed to contain only full virtual strides. (E.g.
+ * not have to contain the waste after the last virtual block.)
+ *
+ */
+ if (((phys_addr[i] | phys_size[i] |
+ vstride | pstride) & ~PAGE_MASK) ||
+ !phys_size[i] ||
+ vstride > pstride ||
+ (pstride && (phys_size[i] % pstride)) ||
+ (phys_addr[i] + phys_size[i] - 1 < phys_addr[i]))
+ return NULL;
+
+ pfns[i] = __phys_to_pfn(phys_addr[i]);
+
+ /*
+ * High mappings must be supersection aligned
+ */
+ if (pfns[i] >= 0x100000 &&
+ (__pfn_to_phys(pfns[i]) & ~SUPERSECTION_MASK))
+ return NULL;
+
+ total_size += phys_size[i] / pstride * vstride;
+ }
+
+ type = get_mem_type(mtype);
+ if (!type)
+ return NULL;
+
+ area = get_vm_area(total_size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = addr_i = (unsigned long)area->addr;
+
+ for (i = 0; i < sections && !err; i++) {
+ printk(KERN_ERR "mapping %lx to %lx (%x)\n", __pfn_to_phys(pfns[i]), addr_i, phys_size[i]);
+ pstride = ((phys_stride && phys_stride[i]) ?
+ phys_stride[i] : phys_size[i]);
+ vstride = ((virt_stride && virt_stride[i]) ?
+ virt_stride[i] : phys_size[i]);
+ for (j = 0; j < phys_size[i]; j += pstride) {
+ #ifndef CONFIG_SMP
+ if (DOMAIN_IO == 0 &&
+ (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
+ cpu_is_xsc3()) && pfns[i] >= 0x100000 &&
+ !((__pfn_to_phys(pfns[i]) | vstride | addr_i) & ~SUPERSECTION_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_supersections(addr_i, pfns[i], phys_size[i], type);
+ } else if (!((__pfn_to_phys(pfns[i]) | vstride | addr_i) & ~PMD_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_sections(addr_i, pfns[i], vstride, type);
+ } else
+ #endif
+ err = remap_area_pages(addr_i, pfns[i], vstride, type);
+ pfns[i] += __phys_to_pfn(pstride);
+ addr_i += vstride;
+ }
+ }
+
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ flush_cache_vmap(addr, addr + total_size);
+ return (void __iomem *) addr;
+}
+EXPORT_SYMBOL(__arm_multi_strided_ioremap);
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1708da82da96..c065fea3f7d1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -420,6 +420,10 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 72507c630ceb..c85f5eb42634 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1020_dma_inv_range:
+ENTRY(arm1020_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -295,7 +295,7 @@ arm1020_dma_inv_range:
*
* (same as v4wb)
*/
-arm1020_dma_clean_range:
+ENTRY(arm1020_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -363,6 +363,8 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_kern_dcache_area
.long arm1020_dma_map_area
.long arm1020_dma_unmap_area
+ .long arm1020_dma_inv_range
+ .long arm1020_dma_clean_range
.long arm1020_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index d27829805609..5a3cf7620a2c 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1020e_dma_inv_range:
+ENTRY(arm1020e_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -284,7 +284,7 @@ arm1020e_dma_inv_range:
*
* (same as v4wb)
*/
-arm1020e_dma_clean_range:
+ENTRY(arm1020e_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -349,6 +349,8 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_map_area
.long arm1020e_dma_unmap_area
+ .long arm1020e_dma_inv_range
+ .long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index ce13e4a827de..fec8f5878438 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1022_dma_inv_range:
+ENTRY(arm1022_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -273,7 +273,7 @@ arm1022_dma_inv_range:
*
* (same as v4wb)
*/
-arm1022_dma_clean_range:
+ENTRY(arm1022_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -338,6 +338,8 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_kern_dcache_area
.long arm1022_dma_map_area
.long arm1022_dma_unmap_area
+ .long arm1022_dma_inv_range
+ .long arm1022_dma_clean_range
.long arm1022_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 636672a29c6d..9ece6f666497 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1026_dma_inv_range:
+ENTRY(arm1026_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -267,7 +267,7 @@ arm1026_dma_inv_range:
*
* (same as v4wb)
*/
-arm1026_dma_clean_range:
+ENTRY(arm1026_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -332,6 +332,8 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_kern_dcache_area
.long arm1026_dma_map_area
.long arm1026_dma_unmap_area
+ .long arm1026_dma_inv_range
+ .long arm1026_dma_clean_range
.long arm1026_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 8be81992645d..6f6ab2747da6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm920_dma_inv_range:
+ENTRY(arm920_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -262,7 +262,7 @@ arm920_dma_inv_range:
*
* (same as v4wb)
*/
-arm920_dma_clean_range:
+ENTRY(arm920_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -321,6 +321,8 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_kern_dcache_area
.long arm920_dma_map_area
.long arm920_dma_unmap_area
+ .long arm920_dma_inv_range
+ .long arm920_dma_clean_range
.long arm920_dma_flush_range
#endif
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index c0ff8e4b1074..4e4396b121ca 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm922_dma_inv_range:
+ENTRY(arm922_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -264,7 +264,7 @@ arm922_dma_inv_range:
*
* (same as v4wb)
*/
-arm922_dma_clean_range:
+ENTRY(arm922_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -323,6 +323,8 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_kern_dcache_area
.long arm922_dma_map_area
.long arm922_dma_unmap_area
+ .long arm922_dma_inv_range
+ .long arm922_dma_clean_range
.long arm922_dma_flush_range
#endif
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 3c6cffe400f6..7c01c5d1108c 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm925_dma_inv_range:
+ENTRY(arm925_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -308,7 +308,7 @@ arm925_dma_inv_range:
*
* (same as v4wb)
*/
-arm925_dma_clean_range:
+ENTRY(arm925_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -374,6 +374,8 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_kern_dcache_area
.long arm925_dma_map_area
.long arm925_dma_unmap_area
+ .long arm925_dma_inv_range
+ .long arm925_dma_clean_range
.long arm925_dma_flush_range
ENTRY(cpu_arm925_dcache_clean_area)
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 75b707c9cce1..72a01a4b80ab 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm926_dma_inv_range:
+ENTRY(arm926_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -271,7 +271,7 @@ arm926_dma_inv_range:
*
* (same as v4wb)
*/
-arm926_dma_clean_range:
+ENTRY(arm926_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -337,6 +337,8 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_kern_dcache_area
.long arm926_dma_map_area
.long arm926_dma_unmap_area
+ .long arm926_dma_inv_range
+ .long arm926_dma_clean_range
.long arm926_dma_flush_range
ENTRY(cpu_arm926_dcache_clean_area)
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1af1657819eb..6bb58fca7270 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-arm940_dma_inv_range:
+ENTRY(arm940_dma_inv_range)
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -192,7 +192,7 @@ arm940_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-arm940_dma_clean_range:
+ENTRY(arm940_dma_clean_range)
ENTRY(cpu_arm940_dcache_clean_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -266,6 +266,8 @@ ENTRY(arm940_cache_fns)
.long arm940_flush_kern_dcache_area
.long arm940_dma_map_area
.long arm940_dma_unmap_area
+ .long arm940_dma_inv_range
+ .long arm940_dma_clean_range
.long arm940_dma_flush_range
__INIT
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 1664b6aaff79..ac0f9ba719d7 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area)
* - end - virtual end address
* (same as arm926)
*/
-arm946_dma_inv_range:
+ENTRY(arm946_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -240,7 +240,7 @@ arm946_dma_inv_range:
*
* (same as arm926)
*/
-arm946_dma_clean_range:
+ENTRY(arm946_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -308,6 +308,8 @@ ENTRY(arm946_cache_fns)
.long arm946_flush_kern_dcache_area
.long arm946_dma_map_area
.long arm946_dma_unmap_area
+ .long arm946_dma_inv_range
+ .long arm946_dma_clean_range
.long arm946_dma_flush_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 53e632343849..97e1d784f152 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
* (same as v4wb)
*/
.align 5
-feroceon_dma_inv_range:
+ENTRY(feroceon_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -288,7 +288,7 @@ feroceon_dma_inv_range:
mov pc, lr
.align 5
-feroceon_range_dma_inv_range:
+ENTRY(feroceon_range_dma_inv_range)
mrs r2, cpsr
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -314,7 +314,7 @@ feroceon_range_dma_inv_range:
* (same as v4wb)
*/
.align 5
-feroceon_dma_clean_range:
+ENTRY(feroceon_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -324,7 +324,7 @@ feroceon_dma_clean_range:
mov pc, lr
.align 5
-feroceon_range_dma_clean_range:
+ENTRY(feroceon_range_dma_clean_range)
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
@@ -414,6 +414,8 @@ ENTRY(feroceon_cache_fns)
.long feroceon_flush_kern_dcache_area
.long feroceon_dma_map_area
.long feroceon_dma_unmap_area
+ .long feroceon_dma_inv_range
+ .long feroceon_dma_clean_range
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
@@ -425,6 +427,8 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_map_area
.long feroceon_dma_unmap_area
+ .long feroceon_range_dma_inv_range
+ .long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index caa31154e7db..55b7fbec6548 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-mohawk_dma_inv_range:
+ENTRY(mohawk_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
@@ -241,7 +241,7 @@ mohawk_dma_inv_range:
*
* (same as v4wb)
*/
-mohawk_dma_clean_range:
+ENTRY(mohawk_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -301,6 +301,8 @@ ENTRY(mohawk_cache_fns)
.long mohawk_flush_kern_dcache_area
.long mohawk_dma_map_area
.long mohawk_dma_unmap_area
+ .long mohawk_dma_inv_range
+ .long mohawk_dma_clean_range
.long mohawk_dma_flush_range
ENTRY(cpu_mohawk_dcache_clean_area)
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 046b3d88955e..4e4ce889b3e6 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-xsc3_dma_inv_range:
+ENTRY(xsc3_dma_inv_range)
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
@@ -278,7 +278,7 @@ xsc3_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-xsc3_dma_clean_range:
+ENTRY(xsc3_dma_clean_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
@@ -337,6 +337,8 @@ ENTRY(xsc3_cache_fns)
.long xsc3_flush_kern_dcache_area
.long xsc3_dma_map_area
.long xsc3_dma_unmap_area
+ .long xsc3_dma_inv_range
+ .long xsc3_dma_clean_range
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 63037e2162f2..a7999f94bf27 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-xscale_dma_inv_range:
+ENTRY(xscale_dma_inv_range)
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -336,7 +336,7 @@ xscale_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-xscale_dma_clean_range:
+ENTRY(xscale_dma_clean_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
@@ -409,6 +409,8 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_kern_dcache_area
.long xscale_dma_map_area
.long xscale_dma_unmap_area
+ .long xscale_dma_inv_range
+ .long xscale_dma_clean_range
.long xscale_dma_flush_range
/*
@@ -434,6 +436,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_dma_a0_map_area
.long xscale_dma_unmap_area
.long xscale_dma_flush_range
+ .long xscale_dma_clean_range
+ .long xscale_dma_flush_range
ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index a26a605b73bd..e420d6611dcd 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -51,7 +51,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
mov ip, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -80,7 +84,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
mov r2, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index a2d9860242d3..0c207db661ba 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
- usb.o fb.o io.o
+ usb.o fb.o io.o hdmi_lib.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index e3f02af155dc..4fc2e1366aea 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -404,6 +404,63 @@ static void omap_init_wdt(void)
static inline void omap_init_wdt(void) {}
#endif
+/*---------------------------------------------------------------------------*/
+#ifdef CONFIG_ARCH_OMAP4
+#define NUM_VOUT_RESOURCES 4
+#else
+#define NUM_VOUT_RESOURCES 3
+#endif
+
+#if defined(CONFIG_VIDEO_OMAP3_OUT) || \
+ defined(CONFIG_VIDEO_OMAP3_OUT_MODULE)
+#ifdef CONFIG_ARCH_OMAP4
+#ifdef CONFIG_FB_OMAP2
+static struct resource
+ sdp4430_vout_resource[NUM_VOUT_RESOURCES - CONFIG_FB_OMAP2_NUM_FBS] = {
+ };
+#else
+static struct resource sdp4430_vout_resource[NUM_VOUT_RESOURCES - 1] = {
+};
+#endif
+
+static struct platform_device sdp4430_vout_device = {
+ .name = "omap_vout",
+ .num_resources = ARRAY_SIZE(sdp4430_vout_resource),
+ .resource = &sdp4430_vout_resource[0],
+ .id = -1,
+};
+
+static void omap_init_vout(void)
+{
+ (void) platform_device_register(&sdp4430_vout_device);
+}
+
+#else /* CONFIG_ARCH_OMAP4 */
+#ifdef CONFIG_FB_OMAP2
+static struct resource omap3evm_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = {
+};
+#else
+static struct resource omap3evm_vout_resource[2] = {
+};
+#endif
+
+static struct platform_device omap3evm_vout_device = {
+ .name = "omap_vout",
+ .num_resources = ARRAY_SIZE(omap3evm_vout_resource),
+ .resource = &omap3evm_vout_resource[0],
+ .id = -1,
+};
+static void omap_init_vout(void)
+{
+ (void) platform_device_register(&omap3evm_vout_device);
+}
+#endif /* CONFIG_ARCH_OMAP4 */
+#else
+static inline void omap_init_vout(void) {}
+#endif
+#undef NUM_VOUT_RESOURCES
+/*---------------------------------------------------------------------------*/
+
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
@@ -434,6 +491,7 @@ static int __init omap_init_devices(void)
omap_init_rng();
omap_init_mcpdm();
omap_init_uwire();
+ omap_init_vout();
omap_init_wdt();
return 0;
}
diff --git a/arch/arm/plat-omap/hdmi_lib.c b/arch/arm/plat-omap/hdmi_lib.c
new file mode 100755
index 000000000000..25d1fc508a6e
--- /dev/null
+++ b/arch/arm/plat-omap/hdmi_lib.c
@@ -0,0 +1,1299 @@
+/*
+ * hdmi_lib.c
+ *
+ * HDMI library support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Yong Zhi <y-zhi@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* Rev history */
+/* changed SiVal macros */
+/* added PLL/PHY code */
+/* added EDID code */
+/* moved PLL/PHY code to hdmi panel driver */
+/* cleanup 2/08/10 */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <plat/hdmi_lib.h>
+#include <linux/delay.h>
+
+/* HDMI PHY */
+#define HDMI_TXPHY_TX_CTRL 0x0ul
+#define HDMI_TXPHY_DIGITAL_CTRL 0x4ul
+#define HDMI_TXPHY_POWER_CTRL 0x8ul
+
+/* HDMI Wrapper */
+#define HDMI_WP_REVISION 0x0ul
+#define HDMI_WP_SYSCONFIG 0x10ul
+#define HDMI_WP_IRQSTATUS_RAW 0x24ul
+#define HDMI_WP_IRQSTATUS 0x28ul
+#define HDMI_WP_PWR_CTRL 0x40ul
+#define HDMI_WP_IRQENABLE_SET 0x2Cul
+#define HDMI_WP_VIDEO_CFG 0x50ul
+#define HDMI_WP_VIDEO_SIZE 0x60ul
+#define HDMI_WP_VIDEO_TIMING_H 0x68ul
+#define HDMI_WP_VIDEO_TIMING_V 0x6Cul
+#define HDMI_WP_WP_CLK 0x70ul
+
+/* HDMI IP Core System */
+#define HDMI_CORE_SYS__VND_IDL 0x0ul
+#define HDMI_CORE_SYS__DEV_IDL 0x8ul
+#define HDMI_CORE_SYS__DEV_IDH 0xCul
+#define HDMI_CORE_SYS__DEV_REV 0x10ul
+#define HDMI_CORE_SYS__SRST 0x14ul
+#define HDMI_CORE_CTRL1 0x20ul
+#define HDMI_CORE_SYS__VID_ACEN 0x124ul
+#define HDMI_CORE_SYS__VID_MODE 0x128ul
+#define HDMI_CORE_SYS__INTR1 0x1C4ul
+#define HDMI_CORE_SYS__INTR2 0x1C8ul
+#define HDMI_CORE_SYS__INTR3 0x1CCul
+#define HDMI_CORE_SYS__INTR4 0x1D0ul
+#define HDMI_CORE_SYS__TMDS_CTRL 0x208ul
+#define HDMI_CORE_CTRL1_VEN__FOLLOWVSYNC 0x1ul
+#define HDMI_CORE_CTRL1_HEN__FOLLOWHSYNC 0x1ul
+#define HDMI_CORE_CTRL1_BSEL__24BITBUS 0x1ul
+#define HDMI_CORE_CTRL1_EDGE__RISINGEDGE 0x1ul
+
+#define HDMI_CORE_SYS__DE_DLY 0xC8ul
+#define HDMI_CORE_SYS__DE_CTRL 0xCCul
+#define HDMI_CORE_SYS__DE_TOP 0xD0ul
+#define HDMI_CORE_SYS__DE_CNTL 0xD8ul
+#define HDMI_CORE_SYS__DE_CNTH 0xDCul
+#define HDMI_CORE_SYS__DE_LINL 0xE0ul
+#define HDMI_CORE_SYS__DE_LINH__1 0xE4ul
+
+/* HDMI IP Core Audio Video */
+#define HDMI_CORE_AV_HDMI_CTRL 0xBCul
+#define HDMI_CORE_AV_DPD 0xF4ul
+#define HDMI_CORE_AV_PB_CTRL1 0xF8ul
+#define HDMI_CORE_AV_PB_CTRL2 0xFCul
+#define HDMI_CORE_AV_AVI_TYPE 0x100ul
+#define HDMI_CORE_AV_AVI_VERS 0x104ul
+#define HDMI_CORE_AV_AVI_LEN 0x108ul
+#define HDMI_CORE_AV_AVI_CHSUM 0x10Cul
+#define HDMI_CORE_AV_AVI_DBYTE 0x110ul
+#define HDMI_CORE_AV_AVI_DBYTE__ELSIZE 0x4ul
+
+/* HDMI DDC E-DID */
+#define HDMI_CORE_DDC_CMD 0x3CCul
+#define HDMI_CORE_DDC_STATUS 0x3C8ul
+#define HDMI_CORE_DDC_ADDR 0x3B4ul
+#define HDMI_CORE_DDC_OFFSET 0x3BCul
+#define HDMI_CORE_DDC_COUNT1 0x3C0ul
+#define HDMI_CORE_DDC_COUNT2 0x3C4ul
+#define HDMI_CORE_DDC_DATA 0x3D0ul
+#define HDMI_CORE_DDC_SEGM 0x3B8ul
+
+#define HDMI_WP_AUDIO_CFG 0x80ul
+#define HDMI_WP_AUDIO_CFG2 0x84ul
+#define HDMI_WP_AUDIO_CTRL 0x88ul
+#define HDMI_WP_AUDIO_DATA 0x8Cul
+
+#define HDMI_CORE_AV__AVI_DBYTE 0x110ul
+#define HDMI_CORE_AV__AVI_DBYTE__ELSIZE 0x4ul
+#define HDMI_IP_CORE_AV__AVI_DBYTE__NELEMS 15
+#define HDMI_CORE_AV__SPD_DBYTE 0x190ul
+#define HDMI_CORE_AV__SPD_DBYTE__ELSIZE 0x4ul
+#define HDMI_CORE_AV__SPD_DBYTE__NELEMS 27
+#define HDMI_CORE_AV__AUDIO_DBYTE 0x210ul
+#define HDMI_CORE_AV__AUDIO_DBYTE__ELSIZE 0x4ul
+#define HDMI_CORE_AV__AUDIO_DBYTE__NELEMS 10
+#define HDMI_CORE_AV__MPEG_DBYTE 0x290ul
+#define HDMI_CORE_AV__MPEG_DBYTE__ELSIZE 0x4ul
+#define HDMI_CORE_AV__MPEG_DBYTE__NELEMS 27
+#define HDMI_CORE_AV__GEN_DBYTE 0x300ul
+#define HDMI_CORE_AV__GEN_DBYTE__ELSIZE 0x4ul
+#define HDMI_CORE_AV__GEN_DBYTE__NELEMS 31
+#define HDMI_CORE_AV__GEN2_DBYTE 0x380ul
+#define HDMI_CORE_AV__GEN2_DBYTE__ELSIZE 0x4ul
+#define HDMI_CORE_AV__GEN2_DBYTE__NELEMS 31
+#define HDMI_CORE_AV__ACR_CTRL 0x4ul
+#define HDMI_CORE_AV__FREQ_SVAL 0x8ul
+#define HDMI_CORE_AV__N_SVAL1 0xCul
+#define HDMI_CORE_AV__N_SVAL2 0x10ul
+#define HDMI_CORE_AV__N_SVAL3 0x14ul
+#define HDMI_CORE_AV__CTS_SVAL1 0x18ul
+#define HDMI_CORE_AV__CTS_SVAL2 0x1Cul
+#define HDMI_CORE_AV__CTS_SVAL3 0x20ul
+#define HDMI_CORE_AV__CTS_HVAL1 0x24ul
+#define HDMI_CORE_AV__CTS_HVAL2 0x28ul
+#define HDMI_CORE_AV__CTS_HVAL3 0x2Cul
+#define HDMI_CORE_AV__AUD_MODE 0x50ul
+#define HDMI_CORE_AV__SPDIF_CTRL 0x54ul
+#define HDMI_CORE_AV__HW_SPDIF_FS 0x60ul
+#define HDMI_CORE_AV__SWAP_I2S 0x64ul
+#define HDMI_CORE_AV__SPDIF_ERTH 0x6Cul
+#define HDMI_CORE_AV__I2S_IN_MAP 0x70ul
+#define HDMI_CORE_AV__I2S_IN_CTRL 0x74ul
+#define HDMI_CORE_AV__I2S_CHST0 0x78ul
+#define HDMI_CORE_AV__I2S_CHST1 0x7Cul
+#define HDMI_CORE_AV__I2S_CHST2 0x80ul
+#define HDMI_CORE_AV__I2S_CHST4 0x84ul
+#define HDMI_CORE_AV__I2S_CHST5 0x88ul
+#define HDMI_CORE_AV__ASRC 0x8Cul
+#define HDMI_CORE_AV__I2S_IN_LEN 0x90ul
+#define HDMI_CORE_AV__HDMI_CTRL 0xBCul
+#define HDMI_CORE_AV__AUDO_TXSTAT 0xC0ul
+#define HDMI_CORE_AV__AUD_PAR_BUSCLK_1 0xCCul
+#define HDMI_CORE_AV__AUD_PAR_BUSCLK_2 0xD0ul
+#define HDMI_CORE_AV__AUD_PAR_BUSCLK_3 0xD4ul
+#define HDMI_CORE_AV__TEST_TXCTRL 0xF0ul
+#define HDMI_CORE_AV__DPD 0xF4ul
+#define HDMI_CORE_AV__PB_CTRL1 0xF8ul
+#define HDMI_CORE_AV__PB_CTRL2 0xFCul
+#define HDMI_CORE_AV__AVI_TYPE 0x100ul
+#define HDMI_CORE_AV__AVI_VERS 0x104ul
+#define HDMI_CORE_AV__AVI_LEN 0x108ul
+#define HDMI_CORE_AV__AVI_CHSUM 0x10Cul
+#define HDMI_CORE_AV__SPD_TYPE 0x180ul
+#define HDMI_CORE_AV__SPD_VERS 0x184ul
+#define HDMI_CORE_AV__SPD_LEN 0x188ul
+#define HDMI_CORE_AV__SPD_CHSUM 0x18Cul
+#define HDMI_CORE_AV__AUDIO_TYPE 0x200ul
+#define HDMI_CORE_AV__AUDIO_VERS 0x204ul
+#define HDMI_CORE_AV__AUDIO_LEN 0x208ul
+#define HDMI_CORE_AV__AUDIO_CHSUM 0x20Cul
+#define HDMI_CORE_AV__MPEG_TYPE 0x280ul
+#define HDMI_CORE_AV__MPEG_VERS 0x284ul
+#define HDMI_CORE_AV__MPEG_LEN 0x288ul
+#define HDMI_CORE_AV__MPEG_CHSUM 0x28Cul
+#define HDMI_CORE_AV__CP_BYTE1 0x37Cul
+#define HDMI_CORE_AV__CEC_ADDR_ID 0x3FCul
+
+
+static struct {
+ void __iomem *base_core; /*0*/
+ void __iomem *base_core_av; /*1*/
+ void __iomem *base_wp; /*2*/
+ struct mutex hdmi_lock;
+} hdmi;
+
+static inline void hdmi_write_reg(u32 base, u16 idx, u32 val)
+{
+ void __iomem *b;
+
+ switch (base) {
+ case HDMI_CORE_SYS:
+ b = hdmi.base_core;
+ break;
+ case HDMI_CORE_AV:
+ b = hdmi.base_core_av;
+ break;
+ case HDMI_WP:
+ b = hdmi.base_wp;
+ break;
+ default:
+ BUG();
+ }
+ __raw_writel(val, b + idx);
+ /* DBG("write = 0x%x idx =0x%x\r\n", val, idx); */
+}
+
+static inline u32 hdmi_read_reg(u32 base, u16 idx)
+{
+ void __iomem *b;
+ u32 l;
+
+ switch (base) {
+ case HDMI_CORE_SYS:
+ b = hdmi.base_core;
+ break;
+ case HDMI_CORE_AV:
+ b = hdmi.base_core_av;
+ break;
+ case HDMI_WP:
+ b = hdmi.base_wp;
+ break;
+ default:
+ BUG();
+ }
+ l = __raw_readl(b + idx);
+
+ /* DBG("addr = 0x%p rd = 0x%x idx = 0x%x\r\n", (b+idx), l, idx); */
+ return l;
+}
+
+#define FLD_MASK(start, end) (((1 << (start - end + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << end) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define REG_FLD_MOD(base, idx, val, start, end) \
+ hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx), val, start, end))
+
+#define RD_REG_32(COMP, REG) hdmi_read_reg(COMP, REG)
+#define WR_REG_32(COMP, REG, VAL) hdmi_write_reg(COMP, REG, (u32)(VAL))
+
+int hdmi_core_ddc_edid(u8 *pEDID)
+{
+ u32 i, j, l;
+ char checksum = 0;
+ u32 sts = HDMI_CORE_DDC_STATUS;
+ u32 ins = HDMI_CORE_SYS;
+
+ /* Turn on CLK for DDC */
+ REG_FLD_MOD(HDMI_CORE_AV, HDMI_CORE_AV_DPD, 0x7, 2, 0);
+
+ /* Wait */
+ mdelay(10);
+
+ /* Clk SCL Devices */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_CMD, 0xA, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS__IN_PROG */
+ while (FLD_GET(hdmi_read_reg(ins, sts), 4, 4) == 1)
+
+ /* Clear FIFO */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_CMD, 0x9, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS__IN_PROG */
+ while (FLD_GET(hdmi_read_reg(ins, sts), 4, 4) == 1)
+
+ /* Load Slave Address Register */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+
+ /* Load Offset Address Register */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_OFFSET, 0x0, 7, 0);
+ /* Load Byte Count */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_COUNT1, 0x100, 7, 0);
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_COUNT2, 0x100>>8, 1, 0);
+ /* Set DDC_CMD */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+
+ /* Yong: do not optimize this part of the code, seems
+ DDC bus needs some time to get stabilized
+ */
+ l = hdmi_read_reg(ins, sts);
+
+ /* HDMI_CORE_DDC_STATUS__BUS_LOW */
+ if (FLD_GET(l, 6, 6) == 1) {
+ printk("I2C Bus Low?\n\r");
+ return -1;
+ }
+ /* HDMI_CORE_DDC_STATUS__NO_ACK */
+ if (FLD_GET(l, 5, 5) == 1) {
+ printk("I2C No Ack\n\r");
+ return -1;
+ }
+
+ j = 100;
+ while (j--) {
+ l = hdmi_read_reg(ins, sts);
+ /* progress */
+ if (FLD_GET(l, 4, 4) == 1) {
+ /* HACK: Load Slave Address Register again */
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+ REG_FLD_MOD(ins, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+ break;
+ }
+ mdelay(20);
+ }
+
+ i = 0;
+ while (FLD_GET(hdmi_read_reg(ins, sts), 4, 4) == 1
+ | FLD_GET(hdmi_read_reg(ins, sts), 2, 2) == 0) {
+ if (FLD_GET(hdmi_read_reg(ins,
+ sts), 2, 2) == 0) {
+ /* FIFO not empty */
+ pEDID[i++] = FLD_GET(hdmi_read_reg(ins, HDMI_CORE_DDC_DATA), 7, 0);
+ }
+ }
+
+ if (pEDID[0x14] == 0x80) {/* Digital Display */
+ if (pEDID[0x7e] == 0x00) {/* No Extention Block */
+ for (j = 0; j < 128; j++)
+ checksum += pEDID[j];
+ DBG("No extension 128 bit checksum\n");
+ } else {
+ for (j = 0; j < 256; j++)
+ checksum += pEDID[j];
+ DBG("Extension present 256 bit checksum\n");
+ /* HDMI_CORE_DDC_READ_EXTBLOCK(); */
+ }
+ } else {
+ DBG("Analog Display\n");
+ }
+
+ DBG("EDID Content %d\n", i);
+
+#ifdef DEBUG_EDID
+ DBG("Header:\n");
+ for (i = 0x00; i < 0x08; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Vendor & Product:\n");
+ for (i = 0x08; i < 0x12; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("EDID Structure:\n");
+ for (i = 0x12; i < 0x14; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Basic Display Parameter:\n");
+ for (i = 0x14; i < 0x19; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Color Characteristics:\n");
+ for (i = 0x19; i < 0x23; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Established timings:\n");
+ for (i = 0x23; i < 0x26; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Standart timings:\n");
+ for (i = 0x26; i < 0x36; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Detailed timing1:\n");
+ for (i = 0x36; i < 0x48; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Detailed timing2:\n");
+ for (i = 0x48; i < 0x5a; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Detailed timing3:\n");
+ for (i = 0x5a; i < 0x6c; i++)
+ DBG("%02x\n", pEDID[i]);
+ DBG("Detailed timing4:\n");
+ for (i = 0x6c; i < 0x7e; i++)
+ DBG("%02x\n", pEDID[i]);
+#endif
+ if (checksum != 0) {
+ printk("E-EDID checksum failed!!");
+ return -1;
+ }
+ return 0;
+}
+
+static void hdmi_core_init(struct hdmi_core_video_config_t *v_cfg,
+ struct hdmi_core_audio_config *audio_cfg,
+ struct hdmi_core_infoframe_avi *avi,
+ struct hdmi_core_packet_enable_repeat *r_p)
+{
+ DBG("Enter HDMI_Core_GlobalInitVars()\n");
+
+ /*video core*/
+ v_cfg->CoreInputBusWide = HDMI_INPUT_8BIT;
+ v_cfg->CoreOutputDitherTruncation = HDMI_OUTPUTTRUNCATION_8BIT;
+ v_cfg->CoreDeepColorPacketED = HDMI_DEEPCOLORPACKECTDISABLE;
+ v_cfg->CorePacketMode = HDMI_PACKETMODERESERVEDVALUE;
+ v_cfg->CoreHdmiDvi = HDMI_DVI;
+ v_cfg->CoreTclkSelClkMult = FPLL10IDCK;
+
+ /*audio core*/
+ audio_cfg->fs = FS_44100;
+ audio_cfg->n = 0;
+ audio_cfg->cts = 0;
+ audio_cfg->layout = LAYOUT_2CH; /*2channel audio*/
+ audio_cfg->aud_par_busclk = 0;
+ audio_cfg->cts_mode = CTS_MODE_HW;
+
+ /*info frame*/
+ avi->db1y_rgb_yuv422_yuv444 = 0;
+ avi->db1a_active_format_off_on = 0;
+ avi->db1b_no_vert_hori_verthori = 0;
+ avi->db1s_0_1_2 = 0;
+ avi->db2c_no_itu601_itu709_extented = 0;
+ avi->db2m_no_43_169 = 0;
+ avi->db2r_same_43_169_149 = 0;
+ avi->db3itc_no_yes = 0;
+ avi->db3ec_xvyuv601_xvyuv709 = 0;
+ avi->db3q_default_lr_fr = 0;
+ avi->db3sc_no_hori_vert_horivert = 0;
+ avi->db4vic_videocode = 0;
+ avi->db5pr_no_2_3_4_5_6_7_8_9_10 = 0;
+ avi->db6_7_lineendoftop = 0 ;
+ avi->db8_9_linestartofbottom = 0;
+ avi->db10_11_pixelendofleft = 0;
+ avi->db12_13_pixelstartofright = 0;
+
+ /*packet enable and repeat*/
+ r_p->AudioPacketED = 0;
+ r_p->AudioPacketRepeat = 0;
+ r_p->AVIInfoFrameED = 0;
+ r_p->AVIInfoFrameRepeat = 0;
+ r_p->GeneralcontrolPacketED = 0;
+ r_p->GeneralcontrolPacketRepeat = 0;
+ r_p->GenericPacketED = 0;
+ r_p->GenericPacketRepeat = 0;
+}
+
+static void hdmi_core_powerdown_disable(void)
+{
+ DBG("Enter DSS_HDMI_CORE_POWER_DOWN_DISABLE()\n");
+ REG_FLD_MOD(HDMI_CORE_SYS, HDMI_CORE_CTRL1, 0x0, 0, 0);
+}
+
+/* todo: power off the core */
+static void hdmi_core_powerdown_enable(void)
+{
+ REG_FLD_MOD(HDMI_CORE_SYS, HDMI_CORE_CTRL1, 0x1, 0, 0);
+}
+
+static void hdmi_core_swreset_release(void)
+{
+ DBG("Enter DSS_HDMI_CORE_SW_RESET_RELEASE()\n");
+ REG_FLD_MOD(HDMI_CORE_SYS, HDMI_CORE_SYS__SRST, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_assert(void)
+{
+ DBG("Enter DSS_HDMI_CORE_SW_RESET_ASSERT ()\n");
+ REG_FLD_MOD(HDMI_CORE_SYS, HDMI_CORE_SYS__SRST, 0x1, 0, 0);
+}
+
+/* DSS_HDMI_CORE_VIDEO_CONFIG */
+static int hdmi_core_video_config(
+ struct hdmi_core_video_config_t *cfg)
+{
+ u32 name = HDMI_CORE_SYS;
+ u32 av_name = HDMI_CORE_AV;
+ u32 r = 0;
+
+ /*sys_ctrl1 default configuration not tunable*/
+ u32 ven;
+ u32 hen;
+ u32 bsel;
+ u32 edge;
+
+ /*sys_ctrl1 default configuration not tunable*/
+ ven = HDMI_CORE_CTRL1_VEN__FOLLOWVSYNC;
+ hen = HDMI_CORE_CTRL1_HEN__FOLLOWHSYNC;
+ bsel = HDMI_CORE_CTRL1_BSEL__24BITBUS;
+ edge = HDMI_CORE_CTRL1_EDGE__RISINGEDGE;
+
+ /*sys_ctrl1 default configuration not tunable*/
+ r = hdmi_read_reg(name, HDMI_CORE_CTRL1);
+ r = FLD_MOD(r, ven, 5, 5);
+ r = FLD_MOD(r, hen, 4, 4);
+ r = FLD_MOD(r, bsel, 2, 2);
+ r = FLD_MOD(r, edge, 1, 1);
+ hdmi_write_reg(name, HDMI_CORE_CTRL1, r);
+
+ REG_FLD_MOD(name, HDMI_CORE_SYS__VID_ACEN, cfg->CoreInputBusWide, 7, 6);
+
+ /*Vid_Mode */
+ r = hdmi_read_reg(name, HDMI_CORE_SYS__VID_MODE);
+ /*dither truncation configuration*/
+ if (cfg->CoreOutputDitherTruncation >
+ HDMI_OUTPUTTRUNCATION_12BIT) {
+ r = FLD_MOD(r, cfg->CoreOutputDitherTruncation - 3, 7, 6);
+ r = FLD_MOD(r, 1, 5, 5);
+ } else {
+ r = FLD_MOD(r, cfg->CoreOutputDitherTruncation, 7, 6);
+ r = FLD_MOD(r, 0, 5, 5);
+ }
+ hdmi_write_reg(name, HDMI_CORE_SYS__VID_MODE, r);
+
+ /*HDMI_Ctrl*/
+ r = hdmi_read_reg(av_name, HDMI_CORE_AV_HDMI_CTRL);
+ r = FLD_MOD(r, cfg->CoreDeepColorPacketED, 6, 6);
+ r = FLD_MOD(r, cfg->CorePacketMode, 5, 3);
+ r = FLD_MOD(r, cfg->CoreHdmiDvi, 0, 0);
+ hdmi_write_reg(av_name, HDMI_CORE_AV_HDMI_CTRL, r);
+
+ /*TMDS_CTRL*/
+ REG_FLD_MOD(name, HDMI_CORE_SYS__TMDS_CTRL,
+ cfg->CoreTclkSelClkMult, 6, 5);
+
+ return 0;
+}
+
+static int hdmi_core_audio_mode_enable(u32 instanceName)
+{
+ REG_FLD_MOD(instanceName, HDMI_CORE_AV__AUD_MODE, 1, 0, 0);
+ return 0;
+}
+
+static int hdmi_core_audio_config(u32 name,
+ struct hdmi_core_audio_config *audio_cfg)
+{
+ int ret = 0;
+ u32 SD3_EN, SD2_EN, SD1_EN, SD0_EN;
+ u8 DBYTE1, DBYTE2, DBYTE4, CHSUM;
+ u8 size0, size1;
+
+ /*CTS_MODE*/
+ WR_REG_32(name, HDMI_CORE_AV__ACR_CTRL,
+ ((0x0 << 2) | /* MCLK_EN (0: Mclk is not used)*/
+ (0x1 << 1) | /* CTS Request Enable (1:Packet Enable, 0:Disable) */
+ (audio_cfg->cts_mode << 0))); /* CTS Source Select (1:SW, 0:HW)*/
+
+ REG_FLD_MOD(name, HDMI_CORE_AV__FREQ_SVAL, 0, 2, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__N_SVAL1, audio_cfg->n, 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__N_SVAL2, (audio_cfg->n >> 8), 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__N_SVAL3, (audio_cfg->n >> 16), 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__CTS_SVAL1, (audio_cfg->cts), 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__CTS_SVAL2, (audio_cfg->cts >> 8), 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__CTS_SVAL3, (audio_cfg->cts >> 16), 7, 0);
+
+ /*number of channel*/
+ REG_FLD_MOD(name, HDMI_CORE_AV__HDMI_CTRL, audio_cfg->layout, 2, 1);
+ REG_FLD_MOD(name, HDMI_CORE_AV__AUD_PAR_BUSCLK_1,
+ audio_cfg->aud_par_busclk, 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__AUD_PAR_BUSCLK_2,
+ (audio_cfg->aud_par_busclk >> 8), 7, 0);
+ REG_FLD_MOD(name, HDMI_CORE_AV__AUD_PAR_BUSCLK_3,
+ (audio_cfg->aud_par_busclk >> 16), 7, 0);
+ /* FS_OVERRIDE = 1 because // input is used*/
+ WR_REG_32(name, HDMI_CORE_AV__SPDIF_CTRL, 0x1);
+ /* refer to table209 p192 in func core spec*/
+ WR_REG_32(name, HDMI_CORE_AV__I2S_CHST4, audio_cfg->fs);
+
+ /* audio config is mainly due to wrapper hardware connection
+ and so are fixe (hardware) I2S deserializer is by-pass
+ so I2S configuration is not needed (I2S don't care).
+ Wrapper are directly connected at the I2S deserialiser
+ output level so some register call I2S... need to be
+ programm to configure this parallel bus, there configuration
+ is also fixe and due to the hardware connection (I2S hardware)
+ */
+ WR_REG_32(name, HDMI_CORE_AV__I2S_IN_CTRL,
+ (0 << 7) | /* HBRA_ON */
+ (1 << 6) | /* SCK_EDGE Sample clock is rising */
+ (0 << 5) | /* CBIT_ORDER */
+ (0 << 4) | /* VBit, 0x0=PCM, 0x1=compressed */
+ (0 << 3) | /* I2S_WS, 0xdon't care */
+ (0 << 2) | /* I2S_JUST, 0=left-justified 1=right-justified */
+ (0 << 1) | /* I2S_DIR, 0xdon't care */
+ (0)); /* I2S_SHIFT, 0x0 don't care*/
+
+ WR_REG_32(name, HDMI_CORE_AV__I2S_CHST5, /* mode only */
+ (0 << 4) | /* FS_ORIG */
+ (1 << 1) | /* I2S lenght 16bits (refer doc) */
+ (0));/* Audio sample lenght */
+
+ WR_REG_32(name, HDMI_CORE_AV__I2S_IN_LEN, /* mode only */
+ (0xb)); /* In lenght b=>24bits i2s hardware */
+
+ /*channel enable depend of the layout*/
+ if (audio_cfg->layout == LAYOUT_2CH) {
+ SD3_EN = 0x0;
+ SD2_EN = 0x0;
+ SD1_EN = 0x0;
+ SD0_EN = 0x1;
+ }
+ if (audio_cfg->layout == LAYOUT_8CH) {
+ SD3_EN = 0x1;
+ SD2_EN = 0x1;
+ SD1_EN = 0x1;
+ SD0_EN = 0x1;
+ }
+
+ WR_REG_32(name, HDMI_CORE_AV__AUD_MODE,
+ (SD3_EN << 7) | /* SD3_EN */
+ (SD2_EN << 6) | /* SD2_EN */
+ (SD1_EN << 5) | /* SD1_EN */
+ (SD0_EN << 4) | /* SD0_EN */
+ (0 << 3) | /* DSD_EN */
+ (1 << 2) | /* AUD_PAR_EN*/
+ (0 << 1) | /* SPDIF_EN*/
+ (0)); /* AUD_EN*/
+
+ /* Audio info frame setting refer to CEA-861-d spec p75 */
+ /*0x10 because only PCM is supported / -1 because 1 is for 2 channel*/
+ DBYTE1 = 0x10 + (audio_cfg->if_channel_number - 1);
+ DBYTE2 = (audio_cfg->if_fs << 2) + audio_cfg->if_sample_size;
+ /*channel location according to CEA spec*/
+ DBYTE4 = audio_cfg->if_audio_channel_location;
+
+ CHSUM = 0x100-0x84-0x01-0x0A-DBYTE1-DBYTE2-DBYTE4;
+
+ WR_REG_32(name, HDMI_CORE_AV__AUDIO_TYPE, 0x084);
+ WR_REG_32(name, HDMI_CORE_AV__AUDIO_VERS, 0x001);
+ WR_REG_32(name, HDMI_CORE_AV__AUDIO_LEN, 0x00A);
+ WR_REG_32(name, HDMI_CORE_AV__AUDIO_CHSUM, CHSUM); /*don't care on VMP*/
+
+ size0 = HDMI_CORE_AV__AUDIO_DBYTE;
+ size1 = HDMI_CORE_AV__AUDIO_DBYTE__ELSIZE;
+ hdmi_write_reg(name, (size0 + 0 * size1), DBYTE1);
+ hdmi_write_reg(name, (size0 + 1 * size1), DBYTE2);
+ hdmi_write_reg(name, (size0 + 2 * size1), 0x000);
+ hdmi_write_reg(name, (size0 + 3 * size1), DBYTE4);
+ hdmi_write_reg(name, (size0 + 4 * size1), 0x000);
+ hdmi_write_reg(name, (size0 + 5 * size1), 0x000);
+ hdmi_write_reg(name, (size0 + 6 * size1), 0x000);
+ hdmi_write_reg(name, (size0 + 7 * size1), 0x000);
+ hdmi_write_reg(name, (size0 + 8 * size1), 0x000);
+ hdmi_write_reg(name, (size0 + 9 * size1), 0x000);
+
+ return ret;
+}
+
+static int hdmi_core_audio_infoframe_avi(u32 name,
+ struct hdmi_core_infoframe_avi info_avi)
+{
+ u16 offset;
+ int dbyte, dbyte_size;
+ u32 val;
+
+ dbyte = HDMI_CORE_AV_AVI_DBYTE;
+ dbyte_size = HDMI_CORE_AV_AVI_DBYTE__ELSIZE;
+ /*info frame video*/
+ hdmi_write_reg(name, HDMI_CORE_AV_AVI_TYPE, 0x082);
+ hdmi_write_reg(name, HDMI_CORE_AV_AVI_VERS, 0x002);
+ hdmi_write_reg(name, HDMI_CORE_AV_AVI_LEN, 0x00D);
+
+ offset = dbyte + (0 * dbyte_size);
+ val = (info_avi.db1y_rgb_yuv422_yuv444 << 5) |
+ (info_avi.db1a_active_format_off_on << 4) |
+ (info_avi.db1b_no_vert_hori_verthori << 2) |
+ (info_avi.db1s_0_1_2);
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (1 * dbyte_size);
+ val = (info_avi.db2c_no_itu601_itu709_extented << 6) |
+ (info_avi.db2m_no_43_169 << 4) |
+ (info_avi.db2r_same_43_169_149);
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (2 * dbyte_size);
+ val = (info_avi.db3itc_no_yes << 7) |
+ (info_avi.db3ec_xvyuv601_xvyuv709 << 4) |
+ (info_avi.db3q_default_lr_fr << 2) |
+ (info_avi.db3sc_no_hori_vert_horivert);
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (3 * dbyte_size);
+ hdmi_write_reg(name, offset, info_avi.db4vic_videocode);
+
+ offset = dbyte + (4 * dbyte_size);
+ val = info_avi.db5pr_no_2_3_4_5_6_7_8_9_10;
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (5 * dbyte_size);
+ val = info_avi.db6_7_lineendoftop & 0x00FF;
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (6 * dbyte_size);
+ val = ((info_avi.db6_7_lineendoftop >> 8) & 0x00FF);
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (7 * dbyte_size);
+ val = info_avi.db8_9_linestartofbottom & 0x00FF;
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (8 * dbyte_size);
+ val = ((info_avi.db8_9_linestartofbottom >> 8) & 0x00FF);
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (9 * dbyte_size);
+ val = info_avi.db10_11_pixelendofleft & 0x00FF;
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (10 * dbyte_size);
+ val = ((info_avi.db10_11_pixelendofleft >> 8) & 0x00FF);
+ hdmi_write_reg(name, offset, val);
+
+ offset = dbyte + (11 * dbyte_size);
+ val = info_avi.db12_13_pixelstartofright & 0x00FF;
+ hdmi_write_reg(name, offset , val);
+
+ offset = dbyte + (12 * dbyte_size);
+ val = ((info_avi.db12_13_pixelstartofright >> 8) & 0x00FF);
+ hdmi_write_reg(name, offset, val);
+
+ return 0;
+}
+
+static int hdmi_core_av_packet_config(u32 name,
+ struct hdmi_core_packet_enable_repeat r_p)
+{
+ /*enable/repeat the infoframe*/
+ hdmi_write_reg(name, HDMI_CORE_AV_PB_CTRL1,
+ (r_p.AudioPacketED << 5)|
+ (r_p.AudioPacketRepeat << 4)|
+ (r_p.AVIInfoFrameED << 1)|
+ (r_p.AVIInfoFrameRepeat));
+
+ /*enable/repeat the packet*/
+ hdmi_write_reg(name, HDMI_CORE_AV_PB_CTRL2,
+ (r_p.GeneralcontrolPacketED << 3)|
+ (r_p.GeneralcontrolPacketRepeat << 2)|
+ (r_p.GenericPacketED << 1)|
+ (r_p.GenericPacketRepeat));
+ return 0;
+}
+
+static void hdmi_w1_init(struct hdmi_video_timing *t_p,
+ struct hdmi_video_format *f_p,
+ struct hdmi_video_interface *i_p,
+ struct hdmi_irq_vector *pIrqVectorEnable,
+ struct hdmi_audio_format *audio_fmt,
+ struct hdmi_audio_dma *audio_dma)
+{
+ DBG("Enter HDMI_W1_GlobalInitVars()\n");
+
+ t_p->horizontalBackPorch = 0;
+ t_p->horizontalFrontPorch = 0;
+ t_p->horizontalSyncPulse = 0;
+ t_p->verticalBackPorch = 0;
+ t_p->verticalFrontPorch = 0;
+ t_p->verticalSyncPulse = 0;
+
+ f_p->packingMode = HDMI_PACK_10b_RGB_YUV444;
+ f_p->linePerPanel = 0;
+ f_p->pixelPerLine = 0;
+
+ i_p->vSyncPolarity = 0;
+ i_p->hSyncPolarity = 0;
+
+ i_p->interlacing = 0;
+ i_p->timingMode = 0; /* HDMI_TIMING_SLAVE */
+
+ pIrqVectorEnable->pllRecal = 0;
+ pIrqVectorEnable->pllUnlock = 0;
+ pIrqVectorEnable->pllLock = 0;
+ pIrqVectorEnable->phyDisconnect = 0;
+ pIrqVectorEnable->phyConnect = 0;
+ pIrqVectorEnable->phyShort5v = 0;
+ pIrqVectorEnable->videoEndFrame = 0;
+ pIrqVectorEnable->videoVsync = 0;
+ pIrqVectorEnable->fifoSampleRequest = 0;
+ pIrqVectorEnable->fifoOverflow = 0;
+ pIrqVectorEnable->fifoUnderflow = 0;
+ pIrqVectorEnable->ocpTimeOut = 0;
+ pIrqVectorEnable->core = 0;
+
+ audio_fmt->stereo_channel_enable = HDMI_STEREO_ONECHANNELS;
+ audio_fmt->audio_channel_location = HDMI_CEA_CODE_03;
+ audio_fmt->iec = HDMI_AUDIO_FORMAT_LPCM;
+ audio_fmt->justify = HDMI_AUDIO_JUSTIFY_LEFT;
+ audio_fmt->left_before = HDMI_SAMPLE_LEFT_FIRST;
+ audio_fmt->sample_number = HDMI_ONEWORD_ONE_SAMPLE;
+ audio_fmt->sample_size = HDMI_SAMPLE_24BITS;
+
+ audio_dma->dma_transfer = 0x10;
+ audio_dma->block_size = 0xC0;
+ audio_dma->dma_or_irq = HDMI_THRESHOLD_DMA;
+ audio_dma->threshold_value = 0x10;
+ audio_dma->block_start_end = HDMI_BLOCK_STARTEND_ON;
+
+}
+
+
+static void hdmi_w1_irq_enable(struct hdmi_irq_vector *pIrqVectorEnable)
+{
+ u32 r = 0;
+
+ r = ((pIrqVectorEnable->pllRecal << 31) |
+ (pIrqVectorEnable->pllUnlock << 30) |
+ (pIrqVectorEnable->pllLock << 29) |
+ (pIrqVectorEnable->phyDisconnect << 26) |
+ (pIrqVectorEnable->phyConnect << 25) |
+ (pIrqVectorEnable->phyShort5v << 24) |
+ (pIrqVectorEnable->videoEndFrame << 17) |
+ (pIrqVectorEnable->videoVsync << 16) |
+ (pIrqVectorEnable->fifoSampleRequest << 10) |
+ (pIrqVectorEnable->fifoOverflow << 9) |
+ (pIrqVectorEnable->fifoUnderflow << 8) |
+ (pIrqVectorEnable->ocpTimeOut << 4) |
+ (pIrqVectorEnable->core << 0));
+
+ hdmi_write_reg(HDMI_WP, HDMI_WP_IRQENABLE_SET, r);
+}
+
+static inline int hdmi_w1_wait_for_bit_change(const u32 ins,
+ u32 idx, int b2, int b1, int val)
+{
+ int t = 0;
+ while (val != FLD_GET(hdmi_read_reg(ins, idx), b2, b1)) {
+ udelay(1);
+ if (t++ > 1000)
+ return !val;
+ }
+ return val;
+}
+
+/* todo: add timeout value */
+int hdmi_w1_set_wait_srest(void)
+{
+ /* reset W1 */
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_SYSCONFIG, 0x1, 0, 0);
+
+ /* wait till SOFTRESET == 0 */
+ while (FLD_GET(hdmi_read_reg(HDMI_WP, HDMI_WP_SYSCONFIG), 0, 0))
+
+ return 0;
+}
+
+/* PHY_PWR_CMD */
+int hdmi_w1_set_wait_phy_pwr(HDMI_PhyPwr_t val)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ if (hdmi_w1_wait_for_bit_change(HDMI_WP,
+ HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
+ ERR("Failed to set PHY power mode to %d\n", val);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+int hdmi_w1_set_wait_pll_pwr(HDMI_PllPwr_t val)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS=ON */
+ if (hdmi_w1_wait_for_bit_change(HDMI_WP,
+ HDMI_WP_PWR_CTRL, 1, 0, val) != val) {
+ ERR("Failed to set PHY_PWR_STATUS to ON\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void hdmi_w1_video_stop(void)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_VIDEO_CFG, 0, 31, 31);
+}
+
+void hdmi_w1_video_start(void)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_VIDEO_CFG, (u32)0x1, 31, 31);
+}
+
+static void hdmi_w1_video_init_format(struct hdmi_video_format *f_p,
+ struct hdmi_video_timing *t_p, struct hdmi_config *param)
+{
+ DBG("Enter HDMI_W1_ConfigVideoResolutionTiming()\n");
+
+ f_p->linePerPanel = param->lpp;
+ f_p->pixelPerLine = param->ppl;
+
+ t_p->horizontalBackPorch = param->hbp;
+ t_p->horizontalFrontPorch = param->hfp;
+ t_p->horizontalSyncPulse = param->hsw;
+ t_p->verticalBackPorch = param->vbp;
+ t_p->verticalFrontPorch = param->vfp;
+ t_p->verticalSyncPulse = param->vsw;
+}
+
+static void hdmi_w1_video_config_format(
+ struct hdmi_video_format *f_p)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_VIDEO_CFG, f_p->packingMode, 10, 8);
+
+ l |= FLD_VAL(f_p->linePerPanel, 31, 16);
+ l |= FLD_VAL(f_p->pixelPerLine, 15, 0);
+ hdmi_write_reg(HDMI_WP, HDMI_WP_VIDEO_SIZE, l);
+}
+
+static void hdmi_w1_video_config_interface(
+ struct hdmi_video_interface *i_p)
+{
+ u32 r;
+ DBG("Enter HDMI_W1_ConfigVideoInterface()\n");
+
+ r = hdmi_read_reg(HDMI_WP, HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, i_p->vSyncPolarity, 7, 7);
+ r = FLD_MOD(r, i_p->hSyncPolarity, 6, 6);
+ r = FLD_MOD(r, i_p->interlacing, 3, 3);
+ r = FLD_MOD(r, i_p->timingMode, 1, 0);
+ hdmi_write_reg(HDMI_WP, HDMI_WP_VIDEO_CFG, r);
+}
+
+static void hdmi_w1_video_config_timing(
+ struct hdmi_video_timing *t_p)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ DBG("Enter HDMI_W1_ConfigVideoTiming ()\n");
+
+ timing_h |= FLD_VAL(t_p->horizontalBackPorch, 31, 20);
+ timing_h |= FLD_VAL(t_p->horizontalFrontPorch, 19, 8);
+ timing_h |= FLD_VAL(t_p->horizontalSyncPulse, 7, 0);
+ hdmi_write_reg(HDMI_WP, HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(t_p->verticalBackPorch, 31, 20);
+ timing_v |= FLD_VAL(t_p->verticalFrontPorch, 19, 8);
+ timing_v |= FLD_VAL(t_p->verticalSyncPulse, 7, 0);
+ hdmi_write_reg(HDMI_WP, HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+static int hdmi_w1_audio_config_format(u32 name,
+ struct hdmi_audio_format *audio_fmt)
+{
+ int ret = 0;
+ u32 value = 0;
+
+ value = hdmi_read_reg(name, HDMI_WP_AUDIO_CFG);
+ value &= 0xfffffff7;
+ value |= ((audio_fmt->justify) << 3);;
+ value &= 0xfffffffb;
+ value |= ((audio_fmt->left_before) << 2);
+ value &= 0xfffffffd;
+ value |= ((audio_fmt->sample_number) << 1);
+ value &= 0xfffffffe;
+ value |= ((audio_fmt->sample_size));
+ value &= 0xf8ffffff;
+ value |= ((audio_fmt->stereo_channel_enable) << 24);
+ value &= 0xff00ffff;
+ value |= ((audio_fmt->audio_channel_location) << 16);
+ value &= 0xffffffef;
+ value |= ((audio_fmt->iec) << 4);
+ /* Wakeup */
+ value = 0x1030022;
+ hdmi_write_reg(name, HDMI_WP_AUDIO_CFG, value);
+ DBG("HDMI_WP_AUDIO_CFG = 0x%x \n",value);
+
+ return ret;
+}
+
+static int hdmi_w1_audio_config_dma(u32 name, struct hdmi_audio_dma *audio_dma)
+
+{
+ int ret = 0;
+ u32 value = 0;
+
+ value = hdmi_read_reg(name, HDMI_WP_AUDIO_CFG2);
+ value &= 0xffffff00;
+ value |= (audio_dma->block_size);
+ value &= 0xffff00ff;
+ value |= ((audio_dma->dma_transfer) << 8);
+ /* Wakeup */
+ value = 0x20C0;
+ hdmi_write_reg(name, HDMI_WP_AUDIO_CFG2, value);
+ DBG("HDMI_WP_AUDIO_CFG2 = 0x%x \n", value);
+
+ value = hdmi_read_reg(name, HDMI_WP_AUDIO_CTRL);
+ value &= 0xfffffdff;
+ value |= ((audio_dma->dma_or_irq)<<9);
+ value &= 0xfffffe00;
+ value |= (audio_dma->threshold_value);
+ /* Wakeup */
+ value = 0x020;
+ hdmi_write_reg(name, HDMI_WP_AUDIO_CTRL, value);
+ DBG("HDMI_WP_AUDIO_CTRL = 0x%x \n", value);
+
+ return ret;
+}
+
+static void hdmi_w1_audio_enable(void)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_AUDIO_CTRL, 1, 31, 31);
+}
+
+static void hdmi_w1_audio_disable(void)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_AUDIO_CTRL, 0, 31, 31);
+}
+
+static void hdmi_w1_audio_start(void)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_AUDIO_CTRL, 1, 30, 30);
+}
+
+static void hdmi_w1_audio_stop(void)
+{
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_AUDIO_CTRL, 0, 30, 30);
+}
+
+static int hdmi_w1_audio_config(void)
+{
+ int ret;
+
+ struct hdmi_audio_format audio_fmt;
+ struct hdmi_audio_dma audio_dma;
+
+ audio_fmt.justify = HDMI_AUDIO_JUSTIFY_LEFT;
+ audio_fmt.sample_number = HDMI_ONEWORD_ONE_SAMPLE;
+ audio_fmt.sample_size = HDMI_SAMPLE_24BITS;
+ audio_fmt.stereo_channel_enable = HDMI_STEREO_ONECHANNELS;
+ audio_fmt.audio_channel_location = 0x03;
+
+ ret = hdmi_w1_audio_config_format(HDMI_WP, &audio_fmt);
+
+ audio_dma.dma_transfer = 0x20;
+ audio_dma.threshold_value = 0x60;
+ audio_dma.dma_or_irq = HDMI_THRESHOLD_DMA;
+
+ ret = hdmi_w1_audio_config_dma(HDMI_WP, &audio_dma);
+
+ return ret;
+}
+
+int hdmi_lib_enable(struct hdmi_config *cfg)
+{
+ u32 r, val;
+
+ u32 av_name = HDMI_CORE_AV;
+
+ /*HDMI*/
+ struct hdmi_video_timing VideoTimingParam;
+ struct hdmi_video_format VideoFormatParam;
+ struct hdmi_video_interface VideoInterfaceParam;
+ struct hdmi_irq_vector IrqHdmiVectorEnable;
+ struct hdmi_audio_format audio_fmt;
+ struct hdmi_audio_dma audio_dma;
+
+ /*HDMI core*/
+ struct hdmi_core_infoframe_avi avi_param;
+ struct hdmi_core_video_config_t v_core_cfg;
+ struct hdmi_core_audio_config audio_cfg;
+ struct hdmi_core_packet_enable_repeat repeat_param;
+
+ hdmi_w1_init(&VideoTimingParam, &VideoFormatParam,
+ &VideoInterfaceParam, &IrqHdmiVectorEnable,
+ &audio_fmt, &audio_dma);
+
+ hdmi_core_init(&v_core_cfg,
+ &audio_cfg,
+ &avi_param,
+ &repeat_param);
+
+ /* Enable PLL Lock and UnLock intrerrupts */
+ IrqHdmiVectorEnable.pllUnlock = 1;
+ IrqHdmiVectorEnable.pllLock = 1;
+
+ /***************** init DSS register **********************/
+ hdmi_w1_irq_enable(&IrqHdmiVectorEnable);
+
+ hdmi_w1_video_init_format(&VideoFormatParam,
+ &VideoTimingParam, cfg);
+
+ hdmi_w1_video_config_timing(&VideoTimingParam);
+
+ /*video config*/
+ VideoFormatParam.packingMode = HDMI_PACK_24b_RGB_YUV444_YUV422;
+
+ hdmi_w1_video_config_format(&VideoFormatParam);
+
+ /* FIXME */
+ VideoInterfaceParam.vSyncPolarity = cfg->v_pol;
+ VideoInterfaceParam.hSyncPolarity = cfg->h_pol;
+ VideoInterfaceParam.interlacing = cfg->interlace;
+ VideoInterfaceParam.timingMode = 1 ; /* HDMI_TIMING_MASTER_24BIT */
+
+ hdmi_w1_video_config_interface(&VideoInterfaceParam);
+
+ /* hnagalla */
+ val = hdmi_read_reg(HDMI_WP, HDMI_WP_VIDEO_SIZE);
+
+ val &= 0x0FFFFFFF;
+ val |= ((0x1f) << 27); /* wakeup */
+ hdmi_write_reg(HDMI_WP, HDMI_WP_VIDEO_SIZE, val);
+
+ hdmi_w1_audio_config();
+
+ /****************************** CORE *******************************/
+ /************* configure core video part ********************************/
+ /*set software reset in the core*/
+ hdmi_core_swreset_assert();
+
+ /*power down off*/
+ hdmi_core_powerdown_disable();
+
+ v_core_cfg.CorePacketMode = HDMI_PACKETMODE24BITPERPIXEL;
+ v_core_cfg.CoreHdmiDvi = HDMI_HDMI;
+
+ /* hnagalla */
+ audio_cfg.fs = 0x02;
+ audio_cfg.if_fs = 0x00;
+ audio_cfg.n = 6144;
+ audio_cfg.cts = 74250;
+
+ /* audio channel */
+ audio_cfg.if_sample_size = 0x0;
+ audio_cfg.layout = 0;
+ audio_cfg.if_channel_number = 2;
+ audio_cfg.if_audio_channel_location = 0x00;
+
+ /* TODO: Is this configuration correct? */
+ audio_cfg.aud_par_busclk= (((128 * 31) - 1) << 8);
+ audio_cfg.cts_mode = 0;
+
+ r = hdmi_core_video_config(&v_core_cfg);
+
+ /* hnagalla */
+ hdmi_core_audio_config(av_name, &audio_cfg);
+ hdmi_core_audio_mode_enable(av_name);
+
+ /*release software reset in the core*/
+ hdmi_core_swreset_release();
+
+ /*configure packet*/
+ /*info frame video see doc CEA861-D page 65*/
+ avi_param.db1y_rgb_yuv422_yuv444 = INFOFRAME_AVI_DB1Y_RGB;
+ avi_param.db1a_active_format_off_on =
+ INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+ avi_param.db1b_no_vert_hori_verthori = INFOFRAME_AVI_DB1B_NO;
+ avi_param.db1s_0_1_2 = INFOFRAME_AVI_DB1S_0;
+ avi_param.db2c_no_itu601_itu709_extented = INFOFRAME_AVI_DB2C_NO;
+ avi_param.db2m_no_43_169 = INFOFRAME_AVI_DB2M_NO;
+ avi_param.db2r_same_43_169_149 = INFOFRAME_AVI_DB2R_SAME;
+ avi_param.db3itc_no_yes = INFOFRAME_AVI_DB3ITC_NO;
+ avi_param.db3ec_xvyuv601_xvyuv709 = INFOFRAME_AVI_DB3EC_XVYUV601;
+ avi_param.db3q_default_lr_fr = INFOFRAME_AVI_DB3Q_DEFAULT;
+ avi_param.db3sc_no_hori_vert_horivert = INFOFRAME_AVI_DB3SC_NO;
+ avi_param.db4vic_videocode = cfg->video_format;
+ avi_param.db5pr_no_2_3_4_5_6_7_8_9_10 = INFOFRAME_AVI_DB5PR_NO;
+ avi_param.db6_7_lineendoftop = 0;
+ avi_param.db8_9_linestartofbottom = 0;
+ avi_param.db10_11_pixelendofleft = 0;
+ avi_param.db12_13_pixelstartofright = 0;
+
+ r = hdmi_core_audio_infoframe_avi(av_name, avi_param);
+
+ /*enable/repeat the infoframe*/
+ repeat_param.AVIInfoFrameED = PACKETENABLE;
+ repeat_param.AVIInfoFrameRepeat = PACKETREPEATON;
+ /* wakeup */
+ repeat_param.AudioPacketED = PACKETENABLE;
+ repeat_param.AudioPacketRepeat = PACKETREPEATON;
+ r = hdmi_core_av_packet_config(av_name, repeat_param);
+
+ REG_FLD_MOD(av_name, HDMI_CORE_AV__HDMI_CTRL, cfg->hdmi_dvi, 0, 0);
+ return r;
+}
+
+int hdmi_lib_init(void){
+ u32 rev;
+
+ hdmi.base_wp = ioremap(HDMI_WP, (HDMI_HDCP - HDMI_WP));
+
+ if (!hdmi.base_wp) {
+ ERR("can't ioremap WP\n");
+ return -ENOMEM;
+ }
+
+ hdmi.base_core = hdmi.base_wp + 0x400;
+ hdmi.base_core_av = hdmi.base_wp + 0x900;
+
+ rev = hdmi_read_reg(HDMI_WP, HDMI_WP_REVISION);
+
+ printk(KERN_INFO "OMAP HDMI W1 rev %d.%d\n",
+ FLD_GET(rev, 10, 8), FLD_GET(rev, 5, 0));
+
+ return 0;
+}
+
+void hdmi_lib_exit(void){
+ iounmap(hdmi.base_wp);
+}
+
+void dump_regs(void){
+ DBG("W1 VIDEO_CFG = 0x%x\r\n", hdmi_read_reg(HDMI_WP, 0x50ul));
+ DBG("Core CTRL1 = 0x%x\r\n", hdmi_read_reg(HDMI_WP, 0x420ul));
+ DBG("Core VID_MODE = 0x%x\r\n", hdmi_read_reg(HDMI_WP, 0x528ul));
+ DBG("Core AV_CTRL = 0x%x\r\n", hdmi_read_reg(HDMI_WP, 0x9bcul));
+ DBG("Core VID_ACEN = 0x%x\r\n", hdmi_read_reg(HDMI_WP, 0x524ul));
+ DBG("Core PB_CTR2 packet buf = 0x%x\r\n", hdmi_read_reg(HDMI_WP, 0x9fcul));
+}
+
+
+
+/* wrapper functions to be used until L24.5 release*/
+int HDMI_CORE_DDC_READEDID(u32 name, u8 *p)
+{
+ int r = hdmi_core_ddc_edid(p);
+ return r;
+}
+
+int HDMI_W1_StopVideoFrame(u32 name)
+{
+ DBG("Enter HDMI_W1_StopVideoFrame()\n");
+ hdmi_w1_video_stop();
+ return 0;
+}
+
+int HDMI_W1_StartVideoFrame(u32 name)
+{
+ DBG("Enter HDMI_W1_StartVideoFrame ()\n");
+ hdmi_w1_video_start();
+ return 0;
+}
+
+/* PHY_PWR_CMD */
+int HDMI_W1_SetWaitPhyPwrState(u32 name,
+ HDMI_PhyPwr_t param)
+{
+ int r = hdmi_w1_set_wait_phy_pwr(param);
+ return r;
+}
+
+/* PLL_PWR_CMD */
+int HDMI_W1_SetWaitPllPwrState(u32 name,
+ HDMI_PllPwr_t param)
+{
+ int r = hdmi_w1_set_wait_pll_pwr(param);
+ return r;
+}
+
+int HDMI_W1_SetWaitSoftReset(void)
+{
+ /* reset W1 */
+ REG_FLD_MOD(HDMI_WP, HDMI_WP_SYSCONFIG, 0x1, 0, 0);
+
+ /* wait till SOFTRESET == 0 */
+ while (FLD_GET(hdmi_read_reg(HDMI_WP, HDMI_WP_SYSCONFIG), 0, 0))
+
+ return 0;
+}
+
+int hdmi_w1_wrapper_enable(u32 instanceName)
+{
+ printk(KERN_INFO "Wrapper Enabled...\n");
+ hdmi_w1_audio_enable();
+ return 0;
+}
+
+int hdmi_w1_wrapper_disable(u32 instanceName)
+{
+ hdmi_w1_audio_enable();
+ printk(KERN_INFO "Wrapper disabled...\n");
+ return 0;
+}
+
+int hdmi_w1_stop_audio_transfer(u32 instanceName)
+{
+ hdmi_w1_audio_stop();
+ return 0;
+}
+
+int hdmi_w1_start_audio_transfer(u32 instanceName)
+{
+ hdmi_w1_audio_start();
+ printk(KERN_INFO "Start audio transfer...\n");
+ return 0;
+}
+
+int DSS_HDMI_CONFIG(HDMI_Timing_t timings, u32 video_format,
+ u32 mode)
+{
+ int err;
+ struct hdmi_config data;
+
+ data.ppl = timings.pixelPerLine;
+ data.lpp = timings.linePerPanel;
+ data.pixel_clock = timings.pplclk;
+
+ data.hsw = timings.horizontalSyncPulse;
+ data.hfp = timings.horizontalFrontPorch;
+ data.hbp = timings.horizontalBackPorch;
+ data.vsw = timings.verticalSyncPulse;
+ data.vfp = timings.verticalFrontPorch;
+ data.vbp = timings.verticalBackPorch;
+
+ data.h_pol = 1;
+ data.v_pol = 1;
+ data.hdmi_dvi = mode;
+ data.video_format = video_format;
+
+ err = hdmi_lib_enable(&data);
+ return err;
+}
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
index c66e464732df..9e947ae43771 100644..100755
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -25,11 +25,16 @@
#include <linux/device.h>
#include <asm/atomic.h>
+#include <plat/omap_device.h>
+
#define DISPC_IRQ_FRAMEDONE (1 << 0)
+ /* OMAP4: FRAMEDONE1: for prim LCD*/
#define DISPC_IRQ_VSYNC (1 << 1)
+ /* OMAP4: VSYNC1: for prim LCD*/
#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
+ /* OMAP4: ACBIAS1: for prim LCD*/
#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
#define DISPC_IRQ_GFX_END_WIN (1 << 7)
@@ -40,9 +45,23 @@
#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
#define DISPC_IRQ_VID2_END_WIN (1 << 13)
#define DISPC_IRQ_SYNC_LOST (1 << 14)
+ /* OMAP4: SYNCLOST1: for prim LCD*/
#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
#define DISPC_IRQ_WAKEUP (1 << 16)
+#ifdef CONFIG_ARCH_OMAP4
+#define DISPC_IRQ_SYNC_LOST_2 (1 << 17)
+#define DISPC_IRQ_VSYNC2 (1 << 18)
+#define DISPC_IRQ_VID3_END_WIN (1 << 19)
+#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
+ /* VID3_BUF_UNDERFLOW*/
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
+#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
+#define DISPC_IRQ_FRAMEDONE_WB (1 << 23)
+#define DISPC_IRQ_FRAMEDONE_DIG (1 << 24) /* FRAMEDONE_TV*/
+#define DISPC_IRQ_WB_BUF_OVERFLOW (1 << 25)
+#endif
+
struct omap_dss_device;
struct omap_overlay_manager;
@@ -53,17 +72,24 @@ enum omap_display_type {
OMAP_DISPLAY_TYPE_SDI = 1 << 2,
OMAP_DISPLAY_TYPE_DSI = 1 << 3,
OMAP_DISPLAY_TYPE_VENC = 1 << 4,
+ OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
};
enum omap_plane {
OMAP_DSS_GFX = 0,
OMAP_DSS_VIDEO1 = 1,
OMAP_DSS_VIDEO2 = 2
+#ifdef CONFIG_ARCH_OMAP4
+ , OMAP_DSS_VIDEO3 = 3
+#endif
};
enum omap_channel {
OMAP_DSS_CHANNEL_LCD = 0,
OMAP_DSS_CHANNEL_DIGIT = 1,
+#ifdef CONFIG_ARCH_OMAP4
+ OMAP_DSS_CHANNEL_LCD2 = 2,
+#endif
};
enum omap_color_mode {
@@ -71,7 +97,7 @@ enum omap_color_mode {
OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
- OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
+ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12x, 16-bit container */
OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
@@ -93,6 +119,15 @@ enum omap_color_mode {
OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
OMAP_DSS_COLOR_UYVY,
+#ifdef CONFIG_ARCH_OMAP4
+ OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
+ OMAP_DSS_COLOR_RGBA12 = 1 << 15, /* RGBA12 - 4444 */
+ OMAP_DSS_COLOR_XRGB12 = 1 << 16, /* xRGB12, 16-bit container */
+ OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16-1555 */
+ OMAP_DSS_COLOR_RGBX24_32_ALGN = 1 << 18, /* 32-msb aligned 24bit */
+ OMAP_DSS_COLOR_XRGB15 = 1 << 19, /* xRGB15: 1555*/
+#endif
+
OMAP_DSS_COLOR_GFX_OMAP3 =
OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
@@ -102,16 +137,30 @@ enum omap_color_mode {
OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
OMAP_DSS_COLOR_VID1_OMAP3 =
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
+#ifdef CONFIG_ARCH_OMAP4
+ OMAP_DSS_COLOR_NV12 | OMAP_DSS_COLOR_RGBA12 |
+ OMAP_DSS_COLOR_XRGB12 | OMAP_DSS_COLOR_ARGB16_1555 |
+ OMAP_DSS_COLOR_RGBX24_32_ALGN | OMAP_DSS_COLOR_XRGB15 |
+#endif
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+ OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
OMAP_DSS_COLOR_VID2_OMAP3 =
+#ifdef CONFIG_ARCH_OMAP4
+ OMAP_DSS_COLOR_NV12 | OMAP_DSS_COLOR_RGBA12 |
+ OMAP_DSS_COLOR_XRGB12 | OMAP_DSS_COLOR_ARGB16_1555 |
+ OMAP_DSS_COLOR_RGBX24_32_ALGN | OMAP_DSS_COLOR_XRGB15 |
+#endif
OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+
+ OMAP_DSS_COLOR_VID3_OMAP3 = OMAP_DSS_COLOR_VID2_OMAP3,
};
enum omap_lcd_display_type {
@@ -173,11 +222,17 @@ enum omap_dss_display_state {
enum omap_dss_overlay_managers {
OMAP_DSS_OVL_MGR_LCD,
OMAP_DSS_OVL_MGR_TV,
+#ifdef CONFIG_ARCH_OMAP4
+ OMAP_DSS_OVL_MGR_LCD2,
+#endif
};
enum omap_dss_rotation_type {
OMAP_DSS_ROT_DMA = 0,
OMAP_DSS_ROT_VRFB = 1,
+#ifdef CONFIG_ARCH_OMAP4
+ OMAP_DSS_ROT_TILER = 2,
+#endif
};
/* clockwise rotation angle */
@@ -197,6 +252,15 @@ enum omap_overlay_manager_caps {
OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0,
};
+#ifdef CONFIG_ARCH_OMAP4
+enum omap_overlay_zorder {
+ OMAP_DSS_OVL_ZORDER_0 = 0x0,
+ OMAP_DSS_OVL_ZORDER_1 = 0x1,
+ OMAP_DSS_OVL_ZORDER_2 = 0x2,
+ OMAP_DSS_OVL_ZORDER_3 = 0x3,
+};
+#endif
+
/* RFBI */
struct rfbi_timings {
@@ -230,14 +294,35 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
int hs_pol_inv, int vs_pol_inv, int extif_div);
/* DSI */
-void dsi_bus_lock(void);
-void dsi_bus_unlock(void);
-int dsi_vc_dcs_write(int channel, u8 *data, int len);
-int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len);
-int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen);
-int dsi_vc_set_max_rx_packet_size(int channel, u16 len);
-int dsi_vc_send_null(int channel);
-int dsi_vc_send_bta_sync(int channel);
+enum dsi {
+ dsi1 = 0,
+ dsi2 = 1,
+ };
+
+#define PWM2ON 0x03
+#define PWM2OFF 0x04
+#define TOGGLE3 0x92
+
+#define DSI1_GPIO_27 27
+#define DSI2_GPIO_59 59
+#define DSI2_GPIO_104 104
+#define DSI1_GPIO_102 102
+#define HDMI_GPIO_60 60
+#define HDMI_GPIO_41 41
+#define DLP_4430_GPIO_40 40
+#define DLP_4430_GPIO_44 44
+#define DLP_4430_GPIO_45 45
+#define DLP_4430_GPIO_59 59
+
+void dsi_bus_lock(enum dsi lcd_ix);
+void dsi_bus_unlock(enum dsi lcd_ix);
+int dsi_vc_dcs_write(enum dsi lcd_ix, int channel, u8 *data, int len);
+int dsi_vc_dcs_write_nosync(enum dsi lcd_ix, int channel, u8 *data, int len);
+int dsi_vc_dcs_read(enum dsi lcd_ix, int channel,
+ u8 dcs_cmd, u8 *buf, int buflen);
+int dsi_vc_set_max_rx_packet_size(enum dsi lcd_ix, int channel, u16 len);
+int dsi_vc_send_bta_sync(enum dsi lcd_ix, int channel);
+int dsi_vc_send_null(enum dsi lcd_ix, int channel);
/* Board specific data */
struct omap_dss_board_info {
@@ -245,6 +330,7 @@ struct omap_dss_board_info {
int num_devices;
struct omap_dss_device **devices;
struct omap_dss_device *default_device;
+ void (*set_mpu_wkup_lat)(struct device *dev, int set);
};
struct omap_video_timings {
@@ -295,6 +381,10 @@ struct omap_overlay_info {
u16 out_width; /* if 0, out_width == width */
u16 out_height; /* if 0, out_height == height */
u8 global_alpha;
+#ifdef CONFIG_ARCH_OMAP4
+ u32 p_uv_addr; /* relevant for NV12 format only */
+ enum omap_overlay_zorder zorder;
+#endif
};
struct omap_overlay {
@@ -460,6 +550,8 @@ struct omap_dss_device {
enum omap_dss_display_state state;
+ enum omap_channel channel;
+
int (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
@@ -539,6 +631,12 @@ struct omap_dss_driver {
u16 x, u16 y, u16 w, u16 h);
};
+struct pico_platform_data {
+ u8 gpio_intr;
+};
+
+extern struct omap_device *od;
+
int omap_dss_register_driver(struct omap_dss_driver *);
void omap_dss_unregister_driver(struct omap_dss_driver *);
@@ -564,6 +662,8 @@ struct omap_overlay *omap_dss_get_overlay(int num);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+bool dispc_go_busy(enum omap_channel channel);
+void dispc_go(enum omap_channel channel);
int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
diff --git a/arch/arm/plat-omap/include/plat/hdmi_lib.h b/arch/arm/plat-omap/include/plat/hdmi_lib.h
new file mode 100755
index 000000000000..dea29551522d
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/hdmi_lib.h
@@ -0,0 +1,428 @@
+ /*
+ * hdmi_lib.h
+ *
+ * HDMI driver definition for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HDMI_H_
+#define _HDMI_H_
+
+#include <linux/string.h>
+
+#define HDMI_WP 0x58006000
+#define HDMI_CORE_SYS 0x58006400
+#define HDMI_CORE_AV 0x58006900
+#define HDMI_HDCP 0x58007000
+
+#define HDMI_WP_AUDIO_DATA 0x8Cul
+
+#define DBG(format, ...) \
+ printk(KERN_DEBUG "hdmi: " format, ## __VA_ARGS__)
+#define ERR(format, ...) \
+ printk(KERN_ERR "hdmi error: " format, ## __VA_ARGS__)
+
+#define BITS_32(in_NbBits) \
+ ((((u32)1 << in_NbBits) - 1) | ((u32)1 << in_NbBits))
+
+#define BITFIELD(in_UpBit, in_LowBit) \
+ (BITS_32(in_UpBit) & ~((BITS_32(in_LowBit)) >> 1))
+
+struct hdmi_irq_vector {
+ u8 pllRecal;
+ u8 pllUnlock;
+ u8 pllLock;
+ u8 phyDisconnect;
+ u8 phyConnect;
+ u8 phyShort5v;
+ u8 videoEndFrame;
+ u8 videoVsync;
+ u8 fifoSampleRequest;
+ u8 fifoOverflow;
+ u8 fifoUnderflow;
+ u8 ocpTimeOut;
+ u8 core;
+};
+
+typedef enum HDMI_PhyPwr_label {
+ HDMI_PHYPWRCMD_OFF = 0,
+ HDMI_PHYPWRCMD_LDOON = 1,
+ HDMI_PHYPWRCMD_TXON = 2
+} HDMI_PhyPwr_t, *pHDMI_PhyPwr_t;
+
+typedef enum HDMI_PllPwr_label {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+} HDMI_PllPwr_t, *pHDMI_PllPwr_t;
+
+enum hdmi_core_inputbus_width {
+ HDMI_INPUT_8BIT = 0,
+ HDMI_INPUT_10BIT = 1,
+ HDMI_INPUT_12BIT = 2
+};
+
+enum hdmi_core_dither_trunc {
+ HDMI_OUTPUTTRUNCATION_8BIT = 0,
+ HDMI_OUTPUTTRUNCATION_10BIT = 1,
+ HDMI_OUTPUTTRUNCATION_12BIT = 2,
+ HDMI_OUTPUTDITHER_8BIT = 3,
+ HDMI_OUTPUTDITHER_10BIT = 4,
+ HDMI_OUTPUTDITHER_12BIT = 5
+};
+
+enum hdmi_core_deepcolor_ed {
+ HDMI_DEEPCOLORPACKECTDISABLE = 0,
+ HDMI_DEEPCOLORPACKECTENABLE = 1
+};
+
+enum hdmi_core_packet_mode {
+ HDMI_PACKETMODERESERVEDVALUE = 0,
+ HDMI_PACKETMODE24BITPERPIXEL = 4,
+ HDMI_PACKETMODE30BITPERPIXEL = 5,
+ HDMI_PACKETMODE36BITPERPIXEL = 6,
+ HDMI_PACKETMODE48BITPERPIXEL = 7
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_core_tclkselclkmult {
+ FPLL05IDCK = 0,
+ FPLL10IDCK = 1,
+ FPLL20IDCK = 2,
+ FPLL40IDCK = 3
+};
+
+struct hdmi_core_video_config_t {
+ enum hdmi_core_inputbus_width CoreInputBusWide;
+ enum hdmi_core_dither_trunc CoreOutputDitherTruncation;
+ enum hdmi_core_deepcolor_ed CoreDeepColorPacketED;
+ enum hdmi_core_packet_mode CorePacketMode;
+ enum hdmi_core_hdmi_dvi CoreHdmiDvi;
+ enum hdmi_core_tclkselclkmult CoreTclkSelClkMult;
+};
+
+enum hdmi_core_fs {
+ FS_32000 = 0,
+ FS_44100 = 1
+};
+
+enum hdmi_core_layout {
+ LAYOUT_2CH = 0,
+ LAYOUT_8CH = 1
+};
+
+enum hdmi_core_cts_mode {
+ CTS_MODE_HW = 0,
+ CTS_MODE_SW = 1
+};
+
+enum hdmi_core_packet_ctrl {
+ PACKETENABLE = 1,
+ PACKETDISABLE = 0,
+ PACKETREPEATON = 1,
+ PACKETREPEATOFF = 0
+};
+
+/* INFOFRAME_AVI_ definations */
+enum hdmi_core_infoframe {
+ INFOFRAME_AVI_DB1Y_RGB = 0,
+ INFOFRAME_AVI_DB1Y_YUV422 = 1,
+ INFOFRAME_AVI_DB1Y_YUV444 = 2,
+ INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+ INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
+ INFOFRAME_AVI_DB1B_NO = 0,
+ INFOFRAME_AVI_DB1B_VERT = 1,
+ INFOFRAME_AVI_DB1B_HORI = 2,
+ INFOFRAME_AVI_DB1B_VERTHORI = 3,
+ INFOFRAME_AVI_DB1S_0 = 0,
+ INFOFRAME_AVI_DB1S_1 = 1,
+ INFOFRAME_AVI_DB1S_2 = 2,
+ INFOFRAME_AVI_DB2C_NO = 0,
+ INFOFRAME_AVI_DB2C_ITU601 = 1,
+ INFOFRAME_AVI_DB2C_ITU709 = 2,
+ INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+ INFOFRAME_AVI_DB2M_NO = 0,
+ INFOFRAME_AVI_DB2M_43 = 1,
+ INFOFRAME_AVI_DB2M_169 = 2,
+ INFOFRAME_AVI_DB2R_SAME = 8,
+ INFOFRAME_AVI_DB2R_43 = 9,
+ INFOFRAME_AVI_DB2R_169 = 10,
+ INFOFRAME_AVI_DB2R_149 = 11,
+ INFOFRAME_AVI_DB3ITC_NO = 0,
+ INFOFRAME_AVI_DB3ITC_YES = 1,
+ INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+ INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+ INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+ INFOFRAME_AVI_DB3Q_LR = 1,
+ INFOFRAME_AVI_DB3Q_FR = 2,
+ INFOFRAME_AVI_DB3SC_NO = 0,
+ INFOFRAME_AVI_DB3SC_HORI = 1,
+ INFOFRAME_AVI_DB3SC_VERT = 2,
+ INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+ INFOFRAME_AVI_DB5PR_NO = 0,
+ INFOFRAME_AVI_DB5PR_2 = 1,
+ INFOFRAME_AVI_DB5PR_3 = 2,
+ INFOFRAME_AVI_DB5PR_4 = 3,
+ INFOFRAME_AVI_DB5PR_5 = 4,
+ INFOFRAME_AVI_DB5PR_6 = 5,
+ INFOFRAME_AVI_DB5PR_7 = 6,
+ INFOFRAME_AVI_DB5PR_8 = 7,
+ INFOFRAME_AVI_DB5PR_9 = 8,
+ INFOFRAME_AVI_DB5PR_10 = 9
+};
+
+struct hdmi_core_infoframe_avi {
+ u8 db1y_rgb_yuv422_yuv444;
+ u8 db1a_active_format_off_on;
+ u8 db1b_no_vert_hori_verthori;
+ u8 db1s_0_1_2;
+ u8 db2c_no_itu601_itu709_extented;
+ u8 db2m_no_43_169;
+ u8 db2r_same_43_169_149;
+ u8 db3itc_no_yes;
+ u8 db3ec_xvyuv601_xvyuv709;
+ u8 db3q_default_lr_fr;
+ u8 db3sc_no_hori_vert_horivert;
+ u8 db4vic_videocode;
+ u8 db5pr_no_2_3_4_5_6_7_8_9_10;
+ u16 db6_7_lineendoftop;
+ u16 db8_9_linestartofbottom;
+ u16 db10_11_pixelendofleft;
+ u16 db12_13_pixelstartofright;
+};
+
+struct hdmi_core_packet_enable_repeat {
+ u32 AudioPacketED;
+ u32 AudioPacketRepeat;
+ u32 AVIInfoFrameED;
+ u32 AVIInfoFrameRepeat;
+ u32 GeneralcontrolPacketED;
+ u32 GeneralcontrolPacketRepeat;
+ u32 GenericPacketED;
+ u32 GenericPacketRepeat;
+};
+
+enum hdmi_stereo_channel {
+ HDMI_STEREO_NOCHANNEL = 0,
+ HDMI_STEREO_ONECHANNELS = 1,
+ HDMI_STEREO_TWOCHANNELS = 2,
+ HDMI_STEREO_THREECHANNELS = 3,
+ HDMI_STEREO_FOURCHANNELS = 4
+};
+
+enum hdmi_cea_code {
+ HDMI_CEA_CODE_00 = 0x0,
+ HDMI_CEA_CODE_01 = 0x1,
+ HDMI_CEA_CODE_02 = 0x2,
+ HDMI_CEA_CODE_03 = 0x3,
+ HDMI_CEA_CODE_04 = 0x4,
+ HDMI_CEA_CODE_05 = 0x5,
+ HDMI_CEA_CODE_06 = 0x6,
+ HDMI_CEA_CODE_07 = 0x7,
+ HDMI_CEA_CODE_08 = 0x8,
+ HDMI_CEA_CODE_09 = 0x9,
+ HDMI_CEA_CODE_0A = 0xA,
+ HDMI_CEA_CODE_0B = 0xB,
+ HDMI_CEA_CODE_0C = 0xC,
+ HDMI_CEA_CODE_0D = 0xD,
+ HDMI_CEA_CODE_0E = 0xE,
+ HDMI_CEA_CODE_0F = 0xF,
+ HDMI_CEA_CODE_10 = 0x10,
+ HDMI_CEA_CODE_11 = 0x11,
+ HDMI_CEA_CODE_12 = 0x12,
+ HDMI_CEA_CODE_13 = 0x13,
+ HDMI_CEA_CODE_14 = 0x14,
+ HDMI_CEA_CODE_15 = 0x15,
+ HDMI_CEA_CODE_16 = 0x16,
+ HDMI_CEA_CODE_17 = 0x17,
+ HDMI_CEA_CODE_18 = 0x18,
+ HDMI_CEA_CODE_19 = 0x19,
+ HDMI_CEA_CODE_1A = 0x1A,
+ HDMI_CEA_CODE_1B = 0x1B,
+ HDMI_CEA_CODE_1C = 0x1C,
+ HDMI_CEA_CODE_1D = 0x1D,
+ HDMI_CEA_CODE_1E = 0x1E,
+ HDMI_CEA_CODE_1F = 0x1F,
+ HDMI_CEA_CODE_20 = 0x20,
+ HDMI_CEA_CODE_21 = 0x21,
+ HDMI_CEA_CODE_22 = 0x22,
+ HDMI_CEA_CODE_23 = 0x23,
+ HDMI_CEA_CODE_24 = 0x24,
+ HDMI_CEA_CODE_25 = 0x25,
+ HDMI_CEA_CODE_26 = 0x26
+};
+
+enum hdmi_iec_format {
+ HDMI_AUDIO_FORMAT_LPCM = 0,
+ HDMI_AUDIO_FORMAT_IEC = 1
+};
+
+enum hdmi_audio_justify {
+ HDMI_AUDIO_JUSTIFY_LEFT = 0,
+ HDMI_AUDIO_JUSTIFY_RIGHT = 1
+};
+
+enum hdmi_sample_order {
+ HDMI_SAMPLE_RIGHT_FIRST = 0,
+ HDMI_SAMPLE_LEFT_FIRST = 1
+};
+
+enum hdmi_sample_perword {
+ HDMI_ONEWORD_ONE_SAMPLE = 0,
+ HDMI_ONEWORD_TWO_SAMPLES = 1
+};
+
+enum hdmi_sample_size {
+ HDMI_SAMPLE_16BITS = 0,
+ HDMI_SAMPLE_24BITS = 1
+};
+
+struct hdmi_audio_format {
+ enum hdmi_stereo_channel stereo_channel_enable;
+ enum hdmi_cea_code audio_channel_location;
+ enum hdmi_iec_format iec;
+ enum hdmi_audio_justify justify;
+ enum hdmi_sample_order left_before;
+ enum hdmi_sample_perword sample_number;
+ enum hdmi_sample_size sample_size;
+};
+
+enum hdmi_dma_irq {
+ HDMI_THRESHOLD_DMA = 0,
+ HDMI_THRESHOLD_IRQ = 1
+};
+
+enum hdmi_block_start_end {
+ HDMI_BLOCK_STARTEND_ON = 0,
+ HDMI_BLOCK_STARTEND_OFF = 1
+};
+
+struct hdmi_audio_dma {
+ u8 dma_transfer;
+ u8 block_size;
+ enum hdmi_dma_irq dma_or_irq;
+ u16 threshold_value;
+ enum hdmi_block_start_end block_start_end;
+};
+
+enum hdmi_packing_mode {
+ HDMI_PACK_10b_RGB_YUV444 = 0,
+ HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+ HDMI_PACK_20b_YUV422 = 2,
+ HDMI_PACK_ALREADYPACKED = 7
+};
+
+struct hdmi_video_format {
+ enum hdmi_packing_mode packingMode;
+ u32 linePerPanel;
+ u32 pixelPerLine;
+};
+
+struct hdmi_video_interface {
+ int vSyncPolarity;
+ int hSyncPolarity;
+ int interlacing;
+ int timingMode;
+};
+
+struct hdmi_video_timing {
+ u32 horizontalBackPorch;
+ u32 horizontalFrontPorch;
+ u32 horizontalSyncPulse;
+ u32 verticalBackPorch;
+ u32 verticalFrontPorch;
+ u32 verticalSyncPulse;
+};
+
+typedef struct HDMI_Timing_label {
+ u32 pixelPerLine;
+ u32 linePerPanel;
+ u32 horizontalBackPorch;
+ u32 horizontalFrontPorch;
+ u32 horizontalSyncPulse;
+ u32 verticalBackPorch;
+ u32 verticalFrontPorch;
+ u32 verticalSyncPulse;
+ u32 pplclk;
+} HDMI_Timing_t, *pHDMI_Timing_t;
+
+struct hdmi_config {
+ u16 ppl; /* pixel per line */
+ u16 lpp; /* line per panel */
+ u32 pixel_clock;
+ u16 hsw; /* Horizontal synchronization pulse width */
+ u16 hfp; /* Horizontal front porch */
+ u16 hbp; /* Horizontal back porch */
+ u16 vsw; /* Vertical synchronization pulse width */
+ u16 vfp; /* Vertical front porch */
+ u16 vbp; /* Vertical back porch */
+ u16 interlace;
+ u16 h_pol;
+ u16 v_pol;
+ u16 hdmi_dvi;
+ u16 video_format;
+};
+
+enum hdmi_core_if_fs {
+ IF_FS_NO = 0x0,
+ IF_FS_32000 = 0x1,
+ IF_FS_44100 = 0x2,
+ IF_FS_48000 = 0x3,
+ IF_FS_88200 = 0x4,
+ IF_FS_96000 = 0x5,
+ IF_FS_176400 = 0x6,
+ IF_FS_192000 = 0x7
+};
+
+enum hdmi_core_if_sample_size{
+ IF_NO_PER_SAMPLE = 0x0,
+ IF_16BIT_PER_SAMPLE = 0x1,
+ IF_20BIT_PER_SAMPLE = 0x2,
+ IF_24BIT_PER_SAMPLE = 0x3
+};
+
+struct hdmi_core_audio_config {
+ enum hdmi_core_fs fs; /* 0=32KHz - 1=44.1KHz */
+ u32 n;
+ u32 cts;
+ u32 aud_par_busclk;
+ enum hdmi_core_layout layout; /* 0: 2Ch - 1: 8Ch */
+ enum hdmi_core_cts_mode cts_mode; /* 0: HW - 1: SW*/
+ enum hdmi_core_if_fs if_fs;
+ u32 if_channel_number;
+ enum hdmi_core_if_sample_size if_sample_size;
+ enum hdmi_cea_code if_audio_channel_location;
+ };
+
+/* Function prototype */
+int HDMI_W1_StopVideoFrame(u32);
+int HDMI_W1_StartVideoFrame(u32);
+int HDMI_W1_SetWaitPhyPwrState(u32 name, HDMI_PhyPwr_t param);
+int HDMI_W1_SetWaitPllPwrState(u32 name, HDMI_PllPwr_t param);
+int HDMI_W1_SetWaitSoftReset(void);
+int hdmi_w1_wrapper_disable(u32);
+int hdmi_w1_wrapper_enable(u32);
+int hdmi_w1_stop_audio_transfer(u32);
+int hdmi_w1_start_audio_transfer(u32);
+int HDMI_CORE_DDC_READEDID(u32 Core, u8 *data);
+int DSS_HDMI_CONFIG(HDMI_Timing_t timings, u32 video_format, u32 mode);
+
+int hdmi_lib_init(void);
+void hdmi_lib_exit(void);
+
+#endif
+
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 7b7fcbb9b14c..1e0e5a485903 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -375,6 +375,7 @@
#define INT_44XX_GPTIMER11 (47 + IRQ_GIC_START)
#define INT_44XX_GPTIMER12 (95 + IRQ_GIC_START)
#define INT_44XX_SHA1MD5 (51 + IRQ_GIC_START)
+#define INT_44XX_DSS_DSI1_IRQ (53 + IRQ_GIC_START)
#define INT_44XX_I2C1_IRQ (56 + IRQ_GIC_START)
#define INT_44XX_I2C2_IRQ (57 + IRQ_GIC_START)
#define INT_44XX_HDQ_IRQ (58 + IRQ_GIC_START)
@@ -395,6 +396,7 @@
#define INT_44XX_MCBSP4_IRQ_TX (81 + IRQ_GIC_START)
#define INT_44XX_MCBSP4_IRQ_RX (82 + IRQ_GIC_START)
#define INT_44XX_MMC_IRQ (83 + IRQ_GIC_START)
+#define INT_44XX_DSS_DSI2_IRQ (84 + IRQ_GIC_START)
#define INT_44XX_MMC2_IRQ (86 + IRQ_GIC_START)
#define INT_44XX_MCBSP2_IRQ_TX (89 + IRQ_GIC_START)
#define INT_44XX_MCBSP2_IRQ_RX (90 + IRQ_GIC_START)
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index 4f22e5bb7ff7..15ae75771fdc 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -29,6 +29,7 @@
#include <mach/hardware.h>
#include <plat/clock.h>
+#include <plat/dma.h>
#define OMAP7XX_MCBSP1_BASE 0xfffb1000
#define OMAP7XX_MCBSP2_BASE 0xfffb1800
@@ -56,7 +57,7 @@
#define OMAP44XX_MCBSP1_BASE 0x49022000
#define OMAP44XX_MCBSP2_BASE 0x49024000
#define OMAP44XX_MCBSP3_BASE 0x49026000
-#define OMAP44XX_MCBSP4_BASE 0x48074000
+#define OMAP44XX_MCBSP4_BASE 0x48096000
#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
@@ -265,6 +266,95 @@
#define ENAWAKEUP 0x0004
#define SOFTRST 0x0002
+#define OMAP_MCBSP_WORDLEN_NONE 255
+
+#define OMAP_MCBSP_MASTER 1
+#define OMAP_MCBSP_SLAVE 0
+
+/* McBSP interface operating mode */
+#define OMAP_MCBSP_MASTER 1
+#define OMAP_MCBSP_SLAVE 0
+#define OMAP_MCBSP_AUTO_RST_NONE (0x0)
+#define OMAP_MCBSP_AUTO_RRST (0x1<<1)
+#define OMAP_MCBSP_AUTO_XRST (0x1<<2)
+
+/* SRG ENABLE/DISABLE state */
+#define OMAP_MCBSP_ENABLE_FSG_SRG 1
+#define OMAP_MCBSP_DISABLE_FSG_SRG 2
+/* mono to mono mode*/
+#define OMAP_MCBSP_SKIP_NONE (0x0)
+/* mono to stereo mode */
+#define OMAP_MCBSP_SKIP_FIRST (0x1<<1)
+#define OMAP_MCBSP_SKIP_SECOND (0x1<<2)
+/* RRST STATE */
+#define OMAP_MCBSP_RRST_DISABLE 0
+#define OMAP_MCBSP_RRST_ENABLE 1
+/*XRST STATE */
+#define OMAP_MCBSP_XRST_DISABLE 0
+#define OMAP_MCBSP_XRST_ENABLE 1
+
+#define OMAP_MCBSP_FRAME_SINGLEPHASE 1
+#define OMAP_MCBSP_FRAME_DUALPHASE 2
+
+/* Sample Rate Generator Clock source */
+#define OMAP_MCBSP_SRGCLKSRC_CLKS 1
+#define OMAP_MCBSP_SRGCLKSRC_FCLK 2
+#define OMAP_MCBSP_SRGCLKSRC_CLKR 3
+#define OMAP_MCBSP_SRGCLKSRC_CLKX 4
+/* SRG input clock polarity */
+#define OMAP_MCBSP_CLKS_POLARITY_RISING 1
+#define OMAP_MCBSP_CLKS_POLARITY_FALLING 2
+
+#define OMAP_MCBSP_CLKX_POLARITY_RISING 1
+#define OMAP_MCBSP_CLKX_POLARITY_FALLING 2
+
+#define OMAP_MCBSP_CLKR_POLARITY_RISING 1
+#define OMAP_MCBSP_CLKR_POLARITY_FALLING 2
+
+/* SRG Clock synchronization mode */
+#define OMAP_MCBSP_SRG_FREERUNNING 1
+#define OMAP_MCBSP_SRG_RUNNING 2
+
+/* Frame Sync Source */
+#define OMAP_MCBSP_TXFSYNC_EXTERNAL 0
+#define OMAP_MCBSP_TXFSYNC_INTERNAL 1
+
+#define OMAP_MCBSP_RXFSYNC_EXTERNAL 0
+#define OMAP_MCBSP_RXFSYNC_INTERNAL 1
+
+#define OMAP_MCBSP_CLKRXSRC_EXTERNAL 1
+#define OMAP_MCBSP_CLKRXSRC_INTERNAL 2
+
+#define OMAP_MCBSP_CLKTXSRC_EXTERNAL 1
+#define OMAP_MCBSP_CLKTXSRC_INTERNAL 2
+
+/* Justification */
+#define OMAP_MCBSP_RJUST_ZEROMSB 0
+#define OMAP_MCBSP_RJUST_SIGNMSB 1
+#define OMAP_MCBSP_LJUST_ZEROLSB 2
+
+#define OMAP_MCBSP_DATADELAY0 0
+#define OMAP_MCBSP_DATADELAY1 1
+#define OMAP_MCBSP_DATADELAY2 2
+
+/* Reverse mode for 243X and 34XX */
+#define OMAP_MCBSP_MSBFIRST 0
+#define OMAP_MCBSP_LSBFIRST 1
+
+/* Multi-Channel partition mode */
+#define OMAP_MCBSP_TWOPARTITION_MODE 0
+#define OMAP_MCBSP_EIGHTPARTITION_MODE 1
+
+/* Rx Multichannel selection */
+#define OMAP_MCBSP_RXMUTICH_DISABLE 0
+#define OMAP_MCBSP_RXMUTICH_ENABLE 1
+
+/* Tx Multichannel selection */
+#define OMAP_MCBSP_TXMUTICH_DISABLE 0
+#define OMAP_MCBSP_TXMUTICH_ENABLE 1
+
+#define OMAP_MCBSP_FRAMELEN_N(NUM_WORDS) ((NUM_WORDS - 1) & 0x7F)
+
/********************** McBSP DMA operating modes **************************/
#define MCBSP_DMA_MODE_ELEMENT 0
#define MCBSP_DMA_MODE_THRESHOLD 1
@@ -321,6 +411,22 @@ typedef enum {
typedef int __bitwise omap_mcbsp_io_type_t;
#define OMAP_MCBSP_IRQ_IO ((__force omap_mcbsp_io_type_t) 1)
#define OMAP_MCBSP_POLL_IO ((__force omap_mcbsp_io_type_t) 2)
+#define omap_mcbsp_check_valid_id(id) (id < omap_mcbsp_count)
+#define id_to_mcbsp_ptr(id) mcbsp_ptr[id];
+
+typedef void (*omap_mcbsp_dma_cb) (u32 ch_status, void *arg);
+
+typedef struct omap_mcbsp_dma_transfer_parameters {
+
+ /* Skip the alternate element use fro stereo mode */
+ u8 skip_alt;
+ /* Automagically handle Transfer [XR]RST? */
+ u8 auto_reset;
+ /* callback function executed for every tx/rx completion */
+ omap_mcbsp_dma_cb callback;
+ /* word length of data */
+ u32 word_length1;
+} omap_mcbsp_dma_transfer_params;
typedef enum {
OMAP_MCBSP_WORD_8 = 0,
@@ -410,12 +516,65 @@ struct omap_mcbsp {
struct omap_mcbsp_platform_data *pdata;
struct clk *iclk;
struct clk *fclk;
+
+ u8 auto_reset; /* Auto Reset */
+ u8 txskip_alt; /* Tx skip flags */
+ u8 rxskip_alt; /* Rx skip flags */
+ void *rx_cb_arg;
+ void *tx_cb_arg;
+ omap_mcbsp_dma_cb rx_callback;
+ omap_mcbsp_dma_cb tx_callback;
+ int rx_dma_chain_state;
+ int tx_dma_chain_state;
+ int interface_mode; /* Master / Slave */
+ struct omap_dma_channel_params rx_params; /* Used For Rx FIFO */
+ int rx_config_done;
+
#ifdef CONFIG_ARCH_OMAP34XX
int dma_op_mode;
u16 max_tx_thres;
u16 max_rx_thres;
#endif
};
+
+struct omap_mcbsp_dma_transfer_params {
+ /* Skip the alternate element use fro stereo mode */
+ u8 skip_alt;
+ /* Automagically handle Transfer [XR]RST? */
+ u8 auto_reset;
+ /* callback function executed for every tx/rx completion */
+ omap_mcbsp_dma_cb callback;
+ /* word length of data */
+ u32 word_length1;
+};
+
+struct omap_mcbsp_cfg_param {
+ u8 fsync_src;
+ u8 fs_polarity;
+ u8 clk_polarity;
+ u8 clk_mode;
+ u8 frame_length1;
+ u8 frame_length2;
+ u8 word_length1;
+ u8 word_length2;
+ u8 justification;
+ u8 reverse_compand;
+ u8 phase;
+ u8 data_delay;
+};
+
+struct omap_mcbsp_srg_fsg_cfg {
+ u32 period; /* Frame period */
+ u32 pulse_width; /* Frame width */
+ u8 fsgm;
+ u32 sample_rate;
+ u32 bits_per_sample;
+ u32 srg_src;
+ u8 sync_mode; /* SRG free running mode */
+ u8 polarity;
+ u8 dlb; /* digital loopback mode */
+};
+
extern struct omap_mcbsp **mcbsp_ptr;
extern int omap_mcbsp_count;
@@ -450,6 +609,30 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word);
int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word);
+int omap2_mcbsp_stop_datatx(unsigned int id);
+int omap2_mcbsp_stop_datarx(u32 id);
+int omap2_mcbsp_reset(unsigned int id);
+int omap2_mcbsp_transmitter_index(int id, int *ei, int *fi);
+int omap2_mcbsp_receiver_index(int id, int *ei, int *fi);
+extern int omap2_mcbsp_set_xrst(unsigned int id, u8 state);
+extern int omap2_mcbsp_set_rrst(unsigned int id, u8 state);
+extern int omap2_mcbsp_dma_recv_params(unsigned int id,
+ struct omap_mcbsp_dma_transfer_params *rp);
+extern int omap2_mcbsp_dma_trans_params(unsigned int id,
+ struct omap_mcbsp_dma_transfer_params *tp);
+extern int omap2_mcbsp_receive_data(unsigned int id, void *cbdata,
+ dma_addr_t buf_start_addr, u32 buf_size);
+extern int omap2_mcbsp_send_data(unsigned int id, void *cbdata,
+ dma_addr_t buf_start_addr, u32 buf_size);
+
+extern void omap_mcbsp_write(void __iomem *io_base, u16 reg, u32 val);
+extern int omap_mcbsp_read(void __iomem *io_base, u16 reg);
+
+extern void omap2_mcbsp_params_cfg(unsigned int id, int interface_mode,
+ struct omap_mcbsp_cfg_param *rp,
+ struct omap_mcbsp_cfg_param *tp,
+ struct omap_mcbsp_srg_fsg_cfg *param);
+
/* SPI specific API */
void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg);
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 29937137bf3e..3b1e964cba73 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -17,6 +17,10 @@
#include <plat/board.h>
+#ifdef CONFIG_TIWLAN_SDIO
+#include <linux/mmc/card.h>
+#endif
+
#define OMAP15XX_NR_MMC 1
#define OMAP16XX_NR_MMC 2
#define OMAP1_MMC_SIZE 0x080
@@ -43,6 +47,13 @@
#define OMAP_MMC_MAX_SLOTS 2
+#ifdef CONFIG_TIWLAN_SDIO
+struct embedded_sdio_data {
+struct sdio_cis cis;
+struct sdio_embedded_func *funcs;
+};
+#endif
+
struct omap_mmc_platform_data {
/* back-link to device */
struct device *dev;
@@ -122,6 +133,13 @@ struct omap_mmc_platform_data {
unsigned int ban_openended:1;
+#ifdef CONFIG_TIWLAN_SDIO
+ struct embedded_sdio_data *embedded_sdio;
+ int (*register_status_notify)
+ (void (*callback)(int card_present, void *dev_id),
+ void *dev_id);
+#endif
+
} slots[OMAP_MMC_MAX_SLOTS];
};
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
new file mode 100644
index 000000000000..2a1e7f98b035
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap-serial.h
@@ -0,0 +1,128 @@
+/*
+ * Driver for OMAP-UART controller.
+ * Based on drivers/serial/8250.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Authors:
+ * Govindraj R <govindraj.raja@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAP_SERIAL_H__
+#define __OMAP_SERIAL_H__
+
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+
+#include <plat/control.h>
+#include <plat/mux.h>
+
+#define DRIVER_NAME "omap-hsuart"
+
+/*
+ * Use tty device name as ttyO, [O -> OMAP]
+ * in bootargs we specify as console=ttyO0 if uart1
+ * is used as console uart.
+ */
+#define OMAP_SERIAL_NAME "ttyO"
+
+#define OMAP_MDR1_DISABLE 0x07
+#define OMAP_MDR1_MODE13X 0x03
+#define OMAP_MDR1_MODE16X 0x00
+#define OMAP_MODE13X_SPEED 230400
+
+/*
+ * LCR = 0XBF: Switch to Configuration Mode B.
+ * In configuration mode b allow access
+ * to EFR,DLL,DLH.
+ * Reference OMAP TRM Chapter 17
+ * Section: 1.4.3 Mode Selection
+ */
+#define OMAP_UART_LCR_CONF_MDB 0XBF
+
+/* WER = 0x7F
+ * Enable module level wakeup in WER reg
+ */
+#define OMAP_UART_WER_MOD_WKUP 0X7F
+
+/* Enable XON/XOFF flow control on output */
+#define OMAP_UART_SW_TX 0x04
+
+/* Enable XON/XOFF flow control on input */
+#define OMAP_UART_SW_RX 0x04
+
+#define OMAP_UART_SYSC_RESET 0X07
+#define OMAP_UART_TCR_TRIG 0X0F
+#define OMAP_UART_SW_CLR 0XF0
+
+#define OMAP_UART_DMA_CH_FREE -1
+
+#define RX_TIMEOUT (3 * HZ)
+#define OMAP_MAX_HSUART_PORTS 4
+
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+
+struct omap_uart_port_info {
+ bool dma_enabled; /* To specify DMA Mode */
+ unsigned int uartclk; /* UART clock rate */
+ void __iomem *membase; /* ioremap cookie or NULL */
+ resource_size_t mapbase; /* resource base */
+ unsigned long irqflags; /* request_irq flags */
+ upf_t flags; /* UPF_* flags */
+};
+
+struct uart_omap_dma {
+ u8 uart_dma_tx;
+ u8 uart_dma_rx;
+ int rx_dma_channel;
+ int tx_dma_channel;
+ dma_addr_t rx_buf_dma_phys;
+ dma_addr_t tx_buf_dma_phys;
+ unsigned int uart_base;
+ /*
+ * Buffer for rx dma.It is not required for tx because the buffer
+ * comes from port structure.
+ */
+ unsigned char *rx_buf;
+ unsigned int prev_rx_dma_pos;
+ int tx_buf_size;
+ int tx_dma_used;
+ int rx_dma_used;
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ /* timer to poll activity on rx dma */
+ struct timer_list rx_timer;
+ int rx_buf_size;
+ int rx_timeout;
+};
+
+struct uart_omap_port {
+ struct uart_port port;
+ struct uart_omap_dma uart_dma;
+ struct platform_device *pdev;
+
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char fcr;
+ unsigned char efr;
+
+ int use_dma;
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+ unsigned int lsr_break_flag;
+ unsigned char msr_saved_flags;
+ char name[20];
+ unsigned long port_activity;
+};
+
+#endif /* __OMAP_SERIAL_H__ */
diff --git a/arch/arm/plat-omap/include/plat/wifi_tiwlan.h b/arch/arm/plat-omap/include/plat/wifi_tiwlan.h
new file mode 100755
index 000000000000..b0332b04ddc9
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/wifi_tiwlan.h
@@ -0,0 +1,23 @@
+/* mach/wifi_tiwlan.h
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+struct wifi_platform_data {
+ int (*set_power)(int val);
+ int (*set_reset)(int val);
+ int (*set_carddetect)(int val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+};
+
+#endif
diff --git a/arch/arm/plat-omap/include/syslink/GlobalTypes.h b/arch/arm/plat-omap/include/syslink/GlobalTypes.h
new file mode 100644
index 000000000000..6cd959cde952
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/GlobalTypes.h
@@ -0,0 +1,154 @@
+/*
+ * GlobalTypes.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef __GLOBALTYPES_H
+#define __GLOBALTYPES_H
+
+#define REG volatile
+
+/*
+ * Definition: RET_CODE_BASE
+ *
+ * DESCRIPTION: Base value for return code offsets
+ *
+ *
+ */
+#define RET_CODE_BASE 0
+
+/*
+ * TYPE: ReturnCode_t
+ *
+ * DESCRIPTION: Return codes to be returned by all library functions
+ *
+ *
+ */
+enum ReturnCode_label {
+RET_OK = 0,
+RET_FAIL = -1,
+RET_BAD_NULL_PARAM = -2,
+RET_PARAM_OUT_OF_RANGE = -3,
+RET_INVALID_ID = -4,
+RET_EMPTY = -5,
+RET_FULL = -6,
+RET_TIMEOUT = -7,
+RET_INVALID_OPERATION = -8,
+/* Add new error codes at end of above list */
+RET_NUM_RET_CODES /* this should ALWAYS be LAST entry */
+};
+
+
+
+/*
+ * MACRO: RD_MEM_32_VOLATILE, WR_MEM_32_VOLATILE
+ *
+ * DESCRIPTION: 32 bit register access macros
+ *
+ *
+ */
+#define RD_MEM_32_VOLATILE(addr) \
+((unsigned long)(*((REG unsigned long *)(addr))))
+
+#define WR_MEM_32_VOLATILE(addr, data) \
+(*((REG unsigned long *)(addr)) = (unsigned long)(data))
+
+
+
+
+#ifdef CHECK_INPUT_PARAMS
+/*
+ * MACRO: CHECK_INPUT_PARAMS
+ *
+ * DESCRIPTION: Checks an input code and returns a specified value if code is
+ * invalid value, also writes spy value if error found.
+ *
+ * NOTE: Can be disabled to save HW cycles.
+ *
+ *
+ */
+#define CHECK_INPUT_PARAM(actualValue, invalidValue, \
+returnCodeIfMismatch, spyCodeIfMisMatch) do {\
+ if ((invalidValue) == (actualValue)) {\
+ RES_Set((spyCodeIfMisMatch));\
+ return returnCodeIfMismatch; \
+ } \
+} while (0)
+
+/*
+ * MACRO: CHECK_INPUT_RANGE
+ *
+ * DESCRIPTION: Checks an input value and returns a specified value if not in
+ * specified range, also writes spy value if error found.
+ *
+* NOTE: Can be disabled to save HW cycles.
+ *
+ *
+ */
+#define CHECK_INPUT_RANGE(actualValue, minValidValue, maxValidValue, \
+returnCodeIfMismatch, spyCodeIfMisMatch) do {\
+ if (((actualValue) < (minValidValue)) || \
+ ((actualValue) > (maxValidValue))) {\
+ RES_Set((spyCodeIfMisMatch));\
+ return returnCodeIfMismatch; \
+ } \
+} while (0)
+
+/*
+ * MACRO: CHECK_INPUT_RANGE_MIN0
+ *
+ * DESCRIPTION: Checks an input value and returns a
+ * specified value if not in
+ * specified range, also writes spy value if error found.
+ * The minimum
+ * value is 0.
+ *
+ * NOTE: Can be disabled to save HW cycles.
+ *
+ *
+ */
+#define CHECK_INPUT_RANGE_MIN0(actualValue, maxValidValue, \
+returnCodeIfMismatch, spyCodeIfMisMatch) do {\
+ if ((actualValue) > (maxValidValue)) {\
+ RES_Set((spyCodeIfMisMatch));\
+ return returnCodeIfMismatch; \
+ } \
+} while (0)
+
+#else
+
+#define CHECK_INPUT_PARAM(actualValue, invalidValue, returnCodeIfMismatch,\
+spyCodeIfMisMatch)
+
+#define CHECK_INPUT_PARAM_NO_SPY(actualValue, invalidValue, \
+returnCodeIfMismatch)
+
+#define CHECK_INPUT_RANGE(actualValue, minValidValue, maxValidValue, \
+returnCodeIfMismatch, spyCodeIfMisMatch)
+
+#define CHECK_INPUT_RANGE_NO_SPY(actualValue, minValidValue , \
+maxValidValue, returnCodeIfMismatch)
+
+#define CHECK_INPUT_RANGE_MIN0(actualValue, maxValidValue, \
+returnCodeIfMismatch, spyCodeIfMisMatch)
+
+#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actualValue, \
+maxValidValue, returnCodeIfMismatch)
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __GLOBALTYPES_H */
diff --git a/arch/arm/plat-omap/include/syslink/MBXAccInt.h b/arch/arm/plat-omap/include/syslink/MBXAccInt.h
new file mode 100644
index 000000000000..84e333d0d5da
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/MBXAccInt.h
@@ -0,0 +1,550 @@
+/*
+ * MBXAccInt.h
+ *
+ * Notify mailbox driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef _MLB_ACC_INT_H
+#define _MLB_ACC_INT_H
+
+
+/*
+ * EXPORTED DEFINITIONS
+ *
+ */
+
+
+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET (unsigned long)(0x0040)
+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP (unsigned long)(0x0004)
+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_BANKS (unsigned long)(16)
+
+/* Register offset address definitions relative
+to register set MAILBOX_MESSAGE___REGSET_0_15 */
+
+#define MLB_MAILBOX_MESSAGE___0_15_OFFSET (unsigned long)(0x0)
+
+
+/* Register set MAILBOX_FIFOSTATUS___REGSET_0_15
+address offset, bank address increment and number of banks */
+
+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET (unsigned long)(0x0080)
+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP (unsigned long)(0x0004)
+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_BANKS (unsigned long)(16)
+
+/* Register offset address definitions relative to
+register set MAILBOX_FIFOSTATUS___REGSET_0_15 */
+
+#define MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET (unsigned long)(0x0)
+
+
+/* Register set MAILBOX_MSGSTATUS___REGSET_0_15
+address offset, bank address increment and number of banks */
+
+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET (unsigned long)(0x00c0)
+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP (unsigned long)(0x0004)
+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_BANKS (unsigned long)(16)
+
+/* Register offset address definitions relative to
+register set MAILBOX_MSGSTATUS___REGSET_0_15 */
+
+#define MLB_MAILBOX_MSGSTATUS___0_15_OFFSET (unsigned long)(0x0)
+
+
+/*Register set MAILBOX_IRQSTATUS___REGSET_0_3 address
+offset, bank address increment and number of banks */
+
+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET (unsigned long)(0x0100)
+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP (unsigned long)(0x0008)
+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_BANKS (unsigned long)(4)
+
+#define MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_OFFSET (unsigned long)(0x0104)
+#define MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_STEP (unsigned long)(0x0010)
+#define MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_BANKS (unsigned long)(4)
+
+/* Register offset address definitions relative to
+register set MAILBOX_IRQSTATUS___REGSET_0_3 */
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_OFFSET (unsigned long)(0x0)
+
+
+/* Register set MAILBOX_IRQENABLE___REGSET_0_3
+address offset, bank address increment and number of banks */
+
+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET (unsigned long)(0x0104)
+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP (unsigned long)(0x0008)
+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_BANKS (unsigned long)(4)
+
+#define MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_OFFSET (unsigned long)(0x0108)
+#define MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_STEP (unsigned long)(0x0010)
+#define MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_BANKS (unsigned long)(4)
+
+#define MLB_MAILBOX_IRQENABLE_CLR___REGSET_0_3_OFFSET (unsigned long)(0x010C)
+#define MLB_MAILBOX_IRQENABLE_CLR___REGSET_0_3_STEP (unsigned long)(0x0010)
+#define MLB_MAILBOX_IRQENABLE_CLR___REGSET_0_3_BANKS (unsigned long)(4)
+
+/* Register offset address definitions relative to register
+set MAILBOX_IRQENABLE___REGSET_0_3 */
+
+#define MLB_MAILBOX_IRQENABLE___0_3_OFFSET (unsigned long)(0x0)
+
+
+/* Register offset address definitions */
+
+#define MLB_MAILBOX_REVISION_OFFSET (unsigned long)(0x0)
+#define MLB_MAILBOX_SYSCONFIG_OFFSET (unsigned long)(0x10)
+#define MLB_MAILBOX_SYSSTATUS_OFFSET (unsigned long)(0x14)
+
+
+/* Bitfield mask and offset declarations */
+
+#define MLB_MAILBOX_REVISION_Rev_MASK (unsigned long)(0xff)
+#define MLB_MAILBOX_REVISION_Rev_OFFSET (unsigned long)(0)
+
+#define MLB_MAILBOX_SYSCONFIG_ClockActivity_MASK (unsigned long)(0x100)
+#define MLB_MAILBOX_SYSCONFIG_ClockActivity_OFFSET (unsigned long)(8)
+
+#define MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK (unsigned long)(0x18)
+#define MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET (unsigned long)(3)
+
+#define MLB_MAILBOX_SYSCONFIG_SoftReset_MASK (unsigned long)(0x2)
+#define MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET (unsigned long)(1)
+
+#define MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK (unsigned long)(0x1)
+#define MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET (unsigned long)(0)
+
+#define MLB_MAILBOX_SYSSTATUS_ResetDone_MASK (unsigned long)(0x1)
+#define MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET (unsigned long)(0)
+
+#define MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK \
+(unsigned long)(0xffffffff)
+
+#define MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_OFFSET (unsigned long)(0)
+
+#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK (unsigned long)(0x1)
+#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET (unsigned long)(0)
+
+#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK (unsigned long)(0x7f)
+#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET (unsigned long)(0)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK \
+(unsigned long)(0x80000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_OFFSET \
+(unsigned long)(31)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK \
+(unsigned long)(0x40000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_OFFSET \
+(unsigned long)(30)
+
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK \
+(unsigned long)(0x20000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_OFFSET \
+(unsigned long)(29)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK \
+(unsigned long)(0x10000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_OFFSET \
+(unsigned long)(28)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK \
+(unsigned long)(0x8000000)
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_OFFSET \
+(unsigned long)(27)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK \
+(unsigned long)(0x4000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_OFFSET \
+(unsigned long)(26)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK \
+(unsigned long)(0x2000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_OFFSET \
+(unsigned long)(25)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK \
+(unsigned long)(0x1000000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_OFFSET \
+(unsigned long)(24)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK \
+(unsigned long)(0x800000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_OFFSET \
+(unsigned long)(23)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK \
+(unsigned long)(0x400000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_OFFSET \
+(unsigned long)(22)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK \
+(unsigned long)(0x200000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_OFFSET \
+(unsigned long)(21)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK \
+(unsigned long)(0x100000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_OFFSET \
+(unsigned long)(20)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK \
+(unsigned long)(0x80000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_OFFSET \
+(unsigned long)(19)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK \
+(unsigned long)(0x40000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_OFFSET \
+(unsigned long)(18)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK \
+(unsigned long)(0x20000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_OFFSET \
+(unsigned long)(17)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK \
+(unsigned long)(0x10000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_OFFSET \
+(unsigned long)(16)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK \
+(unsigned long)(0x8000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_OFFSET \
+(unsigned long)(15)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK \
+(unsigned long)(0x4000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_OFFSET \
+(unsigned long)(14)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK \
+(unsigned long)(0x2000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_OFFSET \
+(unsigned long)(13)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK \
+(unsigned long)(0x1000)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_OFFSET \
+(unsigned long)(12)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK \
+(unsigned long)(0x800)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_OFFSET \
+(unsigned long)(11)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK \
+(unsigned long)(0x400)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_OFFSET \
+(unsigned long)(10)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK \
+(unsigned long)(0x200)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_OFFSET \
+(unsigned long)(9)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK \
+(unsigned long)(0x100)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_OFFSET \
+(unsigned long)(8)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK \
+(unsigned long)(0x80)
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_OFFSET \
+(unsigned long)(7)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK \
+(unsigned long)(0x40)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_OFFSET \
+(unsigned long)(6)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK \
+(unsigned long)(0x20)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_OFFSET \
+(unsigned long)(5)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK \
+(unsigned long)(0x10)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_OFFSET \
+(unsigned long)(4)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK \
+(unsigned long)(0x8)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_OFFSET \
+(unsigned long)(3)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK \
+(unsigned long)(0x4)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_OFFSET \
+(unsigned long)(2)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK \
+(unsigned long)(0x2)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_OFFSET \
+(unsigned long)(1)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK \
+(unsigned long)(0x1)
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_OFFSET \
+(unsigned long)(0)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK \
+(unsigned long)(0x80000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_OFFSET \
+(unsigned long)(31)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK \
+(unsigned long)(0x40000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_OFFSET \
+(unsigned long)(30)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK \
+(unsigned long)(0x20000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_OFFSET \
+(unsigned long)(29)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK \
+(unsigned long)(0x10000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_OFFSET \
+(unsigned long)(28)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK \
+(unsigned long)(0x8000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_OFFSET \
+(unsigned long)(27)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK \
+(unsigned long)(0x4000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_OFFSET \
+(unsigned long)(26)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK \
+(unsigned long)(0x2000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_OFFSET \
+(unsigned long)(25)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK \
+(unsigned long)(0x1000000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_OFFSET \
+(unsigned long)(24)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK \
+(unsigned long)(0x800000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_OFFSET \
+(unsigned long)(23)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK \
+(unsigned long)(0x400000)
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_OFFSET \
+(unsigned long)(22)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK \
+(unsigned long)(0x200000)
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_OFFSET \
+(unsigned long)(21)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK \
+(unsigned long)(0x100000)
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_OFFSET \
+(unsigned long)(20)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK \
+(unsigned long)(0x80000)
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_OFFSET \
+(unsigned long)(19)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK \
+(unsigned long)(0x40000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_OFFSET \
+(unsigned long)(18)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK \
+(unsigned long)(0x20000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_OFFSET \
+(unsigned long)(17)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK \
+(unsigned long)(0x10000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_OFFSET \
+(unsigned long)(16)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK \
+(unsigned long)(0x8000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_OFFSET \
+(unsigned long)(15)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK \
+(unsigned long)(0x4000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_OFFSET \
+(unsigned long)(14)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK \
+(unsigned long)(0x2000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_OFFSET \
+(unsigned long)(13)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK \
+(unsigned long)(0x1000)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_OFFSET \
+(unsigned long)(12)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK \
+(unsigned long)(0x800)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_OFFSET \
+(unsigned long)(11)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK \
+(unsigned long)(0x400)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_OFFSET \
+(unsigned long)(10)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK \
+(unsigned long)(0x200)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_OFFSET \
+(unsigned long)(9)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK \
+(unsigned long)(0x100)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_OFFSET \
+(unsigned long)(8)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK \
+(unsigned long)(0x80)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_OFFSET \
+(unsigned long)(7)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK \
+(unsigned long)(0x40)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_OFFSET \
+(unsigned long)(6)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK \
+(unsigned long)(0x20)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_OFFSET \
+(unsigned long)(5)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK \
+(unsigned long)(0x10)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_OFFSET \
+(unsigned long)(4)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK \
+(unsigned long)(0x8)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_OFFSET \
+(unsigned long)(3)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK \
+(unsigned long)(0x4)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_OFFSET \
+(unsigned long)(2)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK \
+(unsigned long)(0x2)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_OFFSET \
+(unsigned long)(1)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK \
+(unsigned long)(0x1)
+
+#define MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_OFFSET \
+(unsigned long)(0)
+
+
+/*
+ * EXPORTED TYPES
+ *
+ */
+
+/* The following type definitions
+represent the enumerated values for each bitfield */
+
+enum MLBMAILBOX_SYSCONFIGSIdleModeE {
+ MLBMAILBOX_SYSCONFIGSIdleModeb00 = 0x0000,
+ MLBMAILBOX_SYSCONFIGSIdleModeb01 = 0x0001,
+ MLBMAILBOX_SYSCONFIGSIdleModeb10 = 0x0002,
+ MLBMAILBOX_SYSCONFIGSIdleModeb11 = 0x0003
+};
+
+enum MLBMAILBOX_SYSCONFIGSoftResetE {
+ MLBMAILBOX_SYSCONFIGSoftResetb0 = 0x0000,
+ MLBMAILBOX_SYSCONFIGSoftResetb1 = 0x0001
+};
+
+enum MLBMAILBOX_SYSCONFIGAutoIdleE {
+ MLBMAILBOX_SYSCONFIGAutoIdleb0 = 0x0000,
+ MLBMAILBOX_SYSCONFIGAutoIdleb1 = 0x0001
+};
+
+enum MLBMAILBOX_SYSSTATUSResetDoneE {
+ MLBMAILBOX_SYSSTATUSResetDonerstongoing = 0x0000,
+ MLBMAILBOX_SYSSTATUSResetDonerstcomp = 0x0001
+};
+
+#endif /* _MLB_ACC_INT_H */
diff --git a/arch/arm/plat-omap/include/syslink/MBXRegAcM.h b/arch/arm/plat-omap/include/syslink/MBXRegAcM.h
new file mode 100644
index 000000000000..1c6732ecc9a2
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/MBXRegAcM.h
@@ -0,0 +1,3027 @@
+/*
+ * MBXRegAcM.h
+ *
+ * Notify mailbox driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MBX_REG_ACM_H
+#define _MBX_REG_ACM_H
+
+
+#include <syslink/GlobalTypes.h>
+#include <syslink/MBXAccInt.h>
+
+
+/*
+ * EXPORTED DEFINITIONS
+ *
+ */
+
+#if defined(USE_LEVEL_1_MACROS)
+
+#define MLBMAILBOX_REVISIONReadRegister32(base_address)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+MLB_MAILBOX_REVISION_OFFSET))
+
+
+
+#define MLBMAILBOX_REVISIONRevRead32(base_address)\
+((((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+ \
+(MLB_MAILBOX_REVISION_OFFSET)))) &\
+MLB_MAILBOX_REVISION_Rev_MASK) >>\
+MLB_MAILBOX_REVISION_Rev_OFFSET))
+
+
+
+#define MLBMAILBOX_REVISIONRevGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_REVISION_Rev_MASK) \
+>> MLB_MAILBOX_REVISION_Rev_OFFSET))
+
+
+
+#define MLBMAILBOX_SYSCONFIGReadRegister32(base_address)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+MLB_MAILBOX_SYSCONFIG_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGWriteRegister32(base_address, value)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+ \
+ offset, newValue);\
+} while (0)
+
+
+#define MLBMAILBOX_SYSCONFIGClockActivityRead32(base_address)\
+((((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_ClockActivity_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_ClockActivity_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGClockActivityGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_SYSCONFIG_ClockActivity_MASK) \
+>> MLB_MAILBOX_SYSCONFIG_ClockActivity_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeRead32(base_address)\
+((((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeReadIsb0032(base_address)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb00 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeReadIsb0132(base_address)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb01 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeReadIsb1032(base_address)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb10 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeReadIsb1132(base_address)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb11 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >> \
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeIsb0032(var)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb00 == \
+(MLBMAILBOX_SYSCONFIGSIdleModeE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeIsb0132(var)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb01 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeIsb1032(var)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb10 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeIsb1132(var)\
+((MLBMAILBOX_SYSCONFIGSIdleModeb11 == (MLBMAILBOX_SYSCONFIGSIdleModeE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeWrite32(base_address, value)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\
+ newValue <<= MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\
+ newValue &= MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+ \
+ offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeWriteb0032(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGSIdleModeb00 <<\
+ MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeWriteb0132(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGSIdleModeb01 <<\
+ MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeWriteb1032(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGSIdleModeb10 <<\
+ MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeWriteb1132(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGSIdleModeb11 <<\
+ MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeSet32(var, value)\
+(((((unsigned long)(var)) & ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK)) |\
+((((unsigned long)(value)) << MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK)))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetRead32(base_address)\
+((((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SoftReset_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetReadIsb032(base_address)\
+((MLBMAILBOX_SYSCONFIGSoftResetb0 == (MLBMAILBOX_SYSCONFIGSoftResetE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SoftReset_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetReadIsb132(base_address)\
+((MLBMAILBOX_SYSCONFIGSoftResetb1 == (MLBMAILBOX_SYSCONFIGSoftResetE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SoftReset_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_SYSCONFIG_SoftReset_MASK) >> \
+MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetIsb032(var)\
+((MLBMAILBOX_SYSCONFIGSoftResetb0 == (MLBMAILBOX_SYSCONFIGSoftResetE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_SoftReset_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetIsb132(var)\
+((MLBMAILBOX_SYSCONFIGSoftResetb1 == (MLBMAILBOX_SYSCONFIGSoftResetE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_SoftReset_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGSoftResetWrite32(base_address, value)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK);\
+ newValue <<= MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET;\
+ newValue &= MLB_MAILBOX_SYSCONFIG_SoftReset_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSoftResetWriteb032(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGSoftResetb0 <<\
+ MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSoftResetWriteb132(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGSoftResetb1 <<\
+ MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGSoftResetSet32(var, value)\
+(((((unsigned long)(var)) & ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK)) |\
+((((unsigned long)(value)) << MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET) &\
+MLB_MAILBOX_SYSCONFIG_SoftReset_MASK)))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleRead32(base_address)\
+((((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleReadIsb032(base_address)\
+((MLBMAILBOX_SYSCONFIGAutoIdleb0 == (MLBMAILBOX_SYSCONFIGAutoIdleE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleReadIsb132(base_address)\
+((MLBMAILBOX_SYSCONFIGAutoIdleb1 == (MLBMAILBOX_SYSCONFIGAutoIdleE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >> MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleIsb032(var)\
+((MLBMAILBOX_SYSCONFIGAutoIdleb0 == (MLBMAILBOX_SYSCONFIGAutoIdleE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleIsb132(var)\
+((MLBMAILBOX_SYSCONFIGAutoIdleb1 == (MLBMAILBOX_SYSCONFIGAutoIdleE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET)))
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleWrite32(base_address, value)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK);\
+ newValue <<= MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET;\
+ newValue &= MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleWriteb032(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGAutoIdleb0 <<\
+ MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleWriteb132(base_address)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ const unsigned long newValue = \
+ (unsigned long)MLBMAILBOX_SYSCONFIGAutoIdleb1 <<\
+ MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET;\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE((unsigned long)(base_address)+offset);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, data);\
+} while (0)
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleSet32(var, value)\
+(((((unsigned long)(var)) & ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK)) |\
+((((unsigned long)(value)) << MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET) &\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK)))
+
+#define MLBMAILBOX_SYSSTATUSReadRegister32(base_address)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+MLB_MAILBOX_SYSSTATUS_OFFSET))
+
+#define MLBMAILBOX_SYSSTATUSResetDoneRead32(base_address)\
+((((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSSTATUS_OFFSET)))) &\
+MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET))
+
+#define MLBMAILBOX_SYSSTATUSResetDoneReadisrstongoing32(base_address)\
+((MLBMAILBOX_SYSSTATUSResetDonerstongoing == (MLBMAILBOX_SYSSTATUSResetDoneE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSSTATUS_OFFSET)))) &\
+MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET)))
+
+#define MLBMAILBOX_SYSSTATUSResetDoneReadisrstcomp32(base_address)\
+((MLBMAILBOX_SYSSTATUSResetDonerstcomp == (MLBMAILBOX_SYSSTATUSResetDoneE)\
+(((RD_MEM_32_VOLATILE((((unsigned long)(base_address))+\
+(MLB_MAILBOX_SYSSTATUS_OFFSET)))) &\
+MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET)))
+
+#define MLBMAILBOX_SYSSTATUSResetDoneGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >> \
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET))
+
+#define MLBMAILBOX_SYSSTATUSResetDoneisrstongoing32(var)\
+((MLBMAILBOX_SYSSTATUSResetDonerstongoing == (MLBMAILBOX_SYSSTATUSResetDoneE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET)))
+
+#define MLBMAILBOX_SYSSTATUSResetDoneisrstcomp32(var)\
+((MLBMAILBOX_SYSSTATUSResetDonerstcomp == (MLBMAILBOX_SYSSTATUSResetDoneE)\
+((((unsigned long)(var)) & MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET)))
+
+#define MLBMAILBOX_MESSAGE___0_15ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_MESSAGE___0_15_OFFSET+((bank)*\
+MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP))))
+
+#define MLBMAILBOX_MESSAGE___0_15WriteRegister32(base_address, bank, value)\
+do {\
+ const unsigned long offset = MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\
+ MLB_MAILBOX_MESSAGE___0_15_OFFSET +\
+ ((bank) * MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_MESSAGE___0_15MessageValueMBmRead32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_MESSAGE___0_15_OFFSET+((bank)*\
+MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP)))) &\
+MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK) >>\
+MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_OFFSET))
+
+#define MLBMAILBOX_MESSAGE___0_15MessageValueMBmGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK) >> \
+MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_OFFSET))
+
+#define MLBMAILBOX_MESSAGE___0_15MessageValueMBmWrite32\
+(base_address, bank, value) do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\
+ MLB_MAILBOX_MESSAGE___0_15_OFFSET +\
+ ((bank) * MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK);\
+ newValue <<= MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_OFFSET;\
+ newValue &= MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_MESSAGE___0_15MessageValueMBmSet32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_OFFSET) &\
+MLB_MAILBOX_MESSAGE___0_15_MessageValueMBm_MASK)))
+
+#define MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+((bank)*\
+MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP))))
+
+#define MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+((bank)*\
+MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP)))) &\
+MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK) >>\
+MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET))
+
+#define MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK) >> \
+MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET))
+
+#define MLBMAILBOX_MSGSTATUS___0_15ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_MSGSTATUS___0_15_OFFSET+((bank)*\
+MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP))))
+
+#define MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_MSGSTATUS___0_15_OFFSET+((bank)*\
+MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP)))) &\
+MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK) >>\
+MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET))
+
+#define MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmGet32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK) \
+>> MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET))
+
+#if defined(OMAP44XX) || defined(VPOM4430_1_06)
+
+#define MLBMAILBOX_IRQSTATUS_CLR___0_3ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_STEP))))
+
+#else
+
+#define MLBMAILBOX_IRQSTATUS___0_3ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP))))
+
+#endif
+
+#if defined(OMAP44XX) || defined(VPOM4430_1_06)
+
+#define MLBMAILBOX_IRQSTATUS_CLR___0_3WriteRegister32\
+(base_address, bank, value) do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * \
+ MLB_MAILBOX_IRQSTATUS_CLR___REGSET_0_3_STEP);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+ \
+ offset, newValue);\
+} while (0)
+
+#else
+
+#define MLBMAILBOX_IRQSTATUS___0_3WriteRegister32(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+offset, newValue);\
+} while (0)
+
+#endif
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB15Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB15Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB15Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB15Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB15_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB15Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB15Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB15Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB15Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB15_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB14Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+\
+((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB14Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB14Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB14Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB14_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB14Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB14Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB14Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB14Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB14_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB13Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB13Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB13Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB13Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB13_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB13Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB13Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB13Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB13Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB13_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB12Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB12Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB12Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB12Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB12_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB12Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB12Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB12Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB12Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB12_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB11Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB11Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB11Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB11Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB11_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB11Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB11Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB11Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB11Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB11_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB10Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB10Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB10Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB10Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB10_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB10Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB10Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB10Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB10Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB10_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB9Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB9Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB9Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB9Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB9_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB9Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB9Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB9Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB9Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB9_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB8Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB8Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB8Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB8Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB8_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB8Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB8Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB8Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB8Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB8_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB7Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB7Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB7Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB7Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB7_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB7Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB7Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB7Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB7Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB7_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB6Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB6Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB6Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB6Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB6_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB6Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB6Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB6Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB6Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB6_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB5Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+\
+((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB5Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB5Write32\
+(base_address, bank, value)\
+do {\
+ onst unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB5Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB5_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB5Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB5Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB5Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB5Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB5_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB4Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB4Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB4Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB4Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB4_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB4Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB4Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB4Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB4Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB4_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB3Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))\
++(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)\
+*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB3Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB3Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB3Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB3_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB3Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB3Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB3Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB3Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB3_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB2Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB2Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB2Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB2Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB2_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB2Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB2Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK) \
+>> MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB2Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB2Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB2_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB1Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+\
+((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB1Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB1Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = RD_\
+ MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB1Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB1_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB1Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB1Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB1Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB1Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB1_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB0Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB0Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB0Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NotFullStatusUuMB0Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NotFullStatusUuMB0_MASK)))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB0Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK) >>\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB0Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK) >> \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB0Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK);\
+ newValue <<= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQSTATUS___0_3NewMsgStatusUuMB0Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_OFFSET) &\
+MLB_MAILBOX_IRQSTATUS___0_3_NewMsgStatusUuMB0_MASK)))
+
+#if defined(OMAP44XX) || defined(VPOM4430_1_06)
+
+#define MLBMAILBOX_IRQENABLE_SET___0_3ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_STEP))))
+
+#else
+
+#define MLBMAILBOX_IRQENABLE___0_3ReadRegister32(base_address, bank)\
+(RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP))))
+
+#endif
+
+#if defined(OMAP44XX) || defined(VPOM4430_1_06)
+
+#define MLBMAILBOX_IRQENABLE_SET___0_3WriteRegister32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * \
+ MLB_MAILBOX_IRQENABLE_SET___REGSET_0_3_STEP);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE_CLR___0_3WriteRegister32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE_CLR___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * \
+ MLB_MAILBOX_IRQENABLE_CLR___REGSET_0_3_STEP);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+offset, newValue);\
+} while (0)
+
+#else
+
+#define MLBMAILBOX_IRQENABLE___0_3WriteRegister32(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ WR_MEM_32_VOLATILE(((unsigned long)(base_address))+offset, newValue);\
+} while (0)
+
+#endif
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB15Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB15Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB15Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB15Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB15_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB15Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB15Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB15Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB15Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB15_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB14Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB14Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB14Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB14Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB14_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB14Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB14Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB14Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB14Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB14_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB13Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB13Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB13Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB13Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB13_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB13Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB13Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB13Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB13Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB13_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB12Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB12Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB12Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB12Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB12_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB12Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB12Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB12Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB12Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB12_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB11Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB11Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB11Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB11Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB11_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB11Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB11Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB11Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB11Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB11_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB10Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB10Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB10Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = \
+ ((unsigned long)(value));\
+ data &= \
+ ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK);\
+ newValue <<= \
+ MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_OFFSET;\
+ newValue \
+ &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB10Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB10_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB10Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+\
+((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB10Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB10Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = \
+ ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB10Set32\
+(var, value)\
+(((((unsigned long)(var)) &\
+~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB10_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB9Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB9Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB9Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB9Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB9_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB9Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB9Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB9Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB9Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB9_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB8Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))\
++(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB8Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB8Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB8Set32(var, value)\
+(((((unsigned long)(var)) & \
+~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB8_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB8Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB8Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB8Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB8Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB8_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB7Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB7Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB7Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB7Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB7_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB7Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB7Get32(var)\
+((unsigned long)((((unsigned long)(var)) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB7Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB7Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB7_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB6Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB6Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB6Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB6Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB6_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB6Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB6Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB6Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB6Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB6_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB5Read32(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB5Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB5Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB5Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB5_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB5Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))\
++(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB5Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB5Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB5Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB5_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB4Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))\
++(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB4Get32\
+(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB4Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB4Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB4_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB4Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB4Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB4Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB4Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB4_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB3Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))\
++(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)\
+*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB3Get32\
+(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB3Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB3Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB3_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB3Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB3Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB3Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB3Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB3_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB2Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB2Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB2Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB2Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB2_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB2Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB2Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB2Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB2Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB2_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB1Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))\
++(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)\
+*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB1Get32(var)\
+((unsigned long)((((unsigned long)(var)) \
+& MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB1Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB1Set32\
+(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK)) |\
+((((unsigned long)(value)) << \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB1_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB1Read32\
+(base_address, bank) do {\
+ ((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+ (MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+ MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK) >>\
+ MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_OFFSET))\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB1Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK) >> \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB1Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB1Set32(var, value)\
+(((((unsigned long)(var)) &\
+~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB1_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB0Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB0Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB0Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NotFullEnableUuMB0Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NotFullEnableUuMB0_MASK)))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB0Read32\
+(base_address, bank)\
+((((RD_MEM_32_VOLATILE(((unsigned long)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+((bank)*\
+MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK) >>\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB0Get32(var)\
+((unsigned long)((((unsigned long)(var)) & \
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK) \
+>> MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_OFFSET))
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB0Write32\
+(base_address, bank, value)\
+do {\
+ const unsigned long offset = \
+ MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank) * MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register unsigned long data = \
+ RD_MEM_32_VOLATILE(((unsigned long)(base_address))+offset);\
+ register unsigned long newValue = ((unsigned long)(value));\
+ data &= ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK);\
+ newValue <<= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_OFFSET;\
+ newValue &= MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((unsigned long)(base_address)+offset, newValue);\
+} while (0)
+
+#define MLBMAILBOX_IRQENABLE___0_3NewMsgEnableUuMB0Set32(var, value)\
+(((((unsigned long)(var)) \
+& ~(MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK)) |\
+((((unsigned long)(value)) \
+<< MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_OFFSET) &\
+MLB_MAILBOX_IRQENABLE___0_3_NewMsgEnableUuMB0_MASK)))
+
+#endif /* USE_LEVEL_1_MACROS */
+
+
+#endif /* _MBX_REG_ACM_H */
diff --git a/arch/arm/plat-omap/include/syslink/MLBAccInt.h b/arch/arm/plat-omap/include/syslink/MLBAccInt.h
new file mode 100644
index 000000000000..6cd469709005
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/MLBAccInt.h
@@ -0,0 +1,132 @@
+/*
+ * MLBAccInt.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef _MLB_ACC_INT_H
+#define _MLB_ACC_INT_H
+
+/* Mappings of level 1 EASI function numbers to function names */
+
+#define EASIL1_MLBMAILBOX_SYSCONFIGReadRegister32 (MLB_BASE_EASIL1 + 3)
+#define EASIL1_MLBMAILBOX_SYSCONFIGWriteRegister32 (MLB_BASE_EASIL1 + 4)
+#define EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeRead32 (MLB_BASE_EASIL1 + 7)
+#define EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeWrite32 (MLB_BASE_EASIL1 + 17)
+#define EASIL1_MLBMAILBOX_SYSCONFIGSoftResetWrite32 (MLB_BASE_EASIL1 + 29)
+#define EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleRead32 \
+ (MLB_BASE_EASIL1 + 33)
+#define EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleWrite32 (MLB_BASE_EASIL1 + 39)
+#define EASIL1_MLBMAILBOX_SYSSTATUSResetDoneRead32 (MLB_BASE_EASIL1 + 44)
+#define EASIL1_MLBMAILBOX_MESSAGE___0_15ReadRegister32 \
+ (MLB_BASE_EASIL1 + 50)
+#define EASIL1_MLBMAILBOX_MESSAGE___0_15WriteRegister32 \
+ (MLB_BASE_EASIL1 + 51)
+#define EASIL1_MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32 \
+ (MLB_BASE_EASIL1 + 56)
+#define EASIL1_MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32 \
+ (MLB_BASE_EASIL1 + 57)
+#define EASIL1_MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32 \
+ (MLB_BASE_EASIL1 + 60)
+#define EASIL1_MLBMAILBOX_IRQSTATUS___0_3ReadRegister32 \
+ (MLB_BASE_EASIL1 + 62)
+#define EASIL1_MLBMAILBOX_IRQSTATUS___0_3WriteRegister32 \
+ (MLB_BASE_EASIL1 + 63)
+#define EASIL1_MLBMAILBOX_IRQENABLE___0_3ReadRegister32 \
+ (MLB_BASE_EASIL1 + 192)
+#define EASIL1_MLBMAILBOX_IRQENABLE___0_3WriteRegister32 \
+ (MLB_BASE_EASIL1 + 193)
+
+/* Register set MAILBOX_MESSAGE___REGSET_0_15 address offset, bank address
+ * increment and number of banks */
+
+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET (u32)(0x0040)
+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP (u32)(0x0004)
+
+/* Register offset address definitions relative to register set
+ * MAILBOX_MESSAGE___REGSET_0_15 */
+
+#define MLB_MAILBOX_MESSAGE___0_15_OFFSET (u32)(0x0)
+
+
+/* Register set MAILBOX_FIFOSTATUS___REGSET_0_15 address offset, bank address
+ * increment and number of banks */
+
+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET (u32)(0x0080)
+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP (u32)(0x0004)
+
+/* Register offset address definitions relative to register set
+ * MAILBOX_FIFOSTATUS___REGSET_0_15 */
+
+#define MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET (u32)(0x0)
+
+
+/* Register set MAILBOX_MSGSTATUS___REGSET_0_15 address offset, bank address
+ * increment and number of banks */
+
+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET (u32)(0x00c0)
+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP (u32)(0x0004)
+
+/* Register offset address definitions relative to register set
+ * MAILBOX_MSGSTATUS___REGSET_0_15 */
+
+#define MLB_MAILBOX_MSGSTATUS___0_15_OFFSET (u32)(0x0)
+
+
+/* Register set MAILBOX_IRQSTATUS___REGSET_0_3 address offset, bank address
+ * increment and number of banks */
+
+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET (u32)(0x0100)
+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP (u32)(0x0008)
+
+/* Register offset address definitions relative to register set
+ * MAILBOX_IRQSTATUS___REGSET_0_3 */
+
+#define MLB_MAILBOX_IRQSTATUS___0_3_OFFSET (u32)(0x0)
+
+
+/* Register set MAILBOX_IRQENABLE___REGSET_0_3 address offset, bank address
+ * increment and number of banks */
+
+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET (u32)(0x0104)
+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP (u32)(0x0008)
+
+/* Register offset address definitions relative to register set
+ * MAILBOX_IRQENABLE___REGSET_0_3 */
+
+#define MLB_MAILBOX_IRQENABLE___0_3_OFFSET (u32)(0x0)
+
+
+/* Register offset address definitions */
+
+#define MLB_MAILBOX_SYSCONFIG_OFFSET (u32)(0x10)
+#define MLB_MAILBOX_SYSSTATUS_OFFSET (u32)(0x14)
+
+
+/* Bitfield mask and offset declarations */
+
+#define MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK (u32)(0x18)
+#define MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET (u32)(3)
+#define MLB_MAILBOX_SYSCONFIG_SoftReset_MASK (u32)(0x2)
+#define MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET (u32)(1)
+#define MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK (u32)(0x1)
+#define MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET (u32)(0)
+#define MLB_MAILBOX_SYSSTATUS_ResetDone_MASK (u32)(0x1)
+#define MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET (u32)(0)
+#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK (u32)(0x1)
+#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET (u32)(0)
+#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK (u32)(0x7f)
+#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET (u32)(0)
+
+#endif /* _MLB_ACC_INT_H */
diff --git a/arch/arm/plat-omap/include/syslink/MLBRegAcM.h b/arch/arm/plat-omap/include/syslink/MLBRegAcM.h
new file mode 100644
index 000000000000..5ef9cf32aef2
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/MLBRegAcM.h
@@ -0,0 +1,206 @@
+/*
+ * MLBRegAcM.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MLB_REG_ACM_H
+#define _MLB_REG_ACM_H
+
+#include <syslink/GlobalTypes.h>
+#include <syslink/EasiGlobal.h>
+#include <syslink/MLBAccInt.h>
+
+#if defined(USE_LEVEL_1_MACROS)
+
+#define MLBMAILBOX_SYSCONFIGReadRegister32(base_address)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGReadRegister32),\
+RD_MEM_32_VOLATILE(((u32)(base_address))+ \
+MLB_MAILBOX_SYSCONFIG_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGWriteRegister32(base_address, value)\
+do {\
+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register u32 newValue = ((u32)(value));\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGWriteRegister32);\
+ WR_MEM_32_VOLATILE(((u32)(base_address))+offset, newValue);\
+} while (0)
+
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeRead32(base_address)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeRead32),\
+(((RD_MEM_32_VOLATILE((((u32)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGSIdleModeWrite32(base_address, value)\
+do {\
+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE(((u32)(base_address)) +\
+ offset);\
+ register u32 newValue = ((u32)(value));\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeWrite32);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\
+ newValue <<= MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\
+ newValue &= MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((u32)(base_address)+offset, newValue);\
+} while (0)
+
+
+#define MLBMAILBOX_SYSCONFIGSoftResetWrite32(base_address, value)\
+do {\
+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register u32 data =\
+ RD_MEM_32_VOLATILE(((u32)(base_address))+offset);\
+ register u32 newValue = ((u32)(value));\
+ printk(KERN_ALERT "In SYSCONFIG MACOR line %i file %s", \
+ __LINE__, __FILE__);\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSoftResetWrite32);\
+ printk(KERN_ALERT "******************BEFORE DATA WRITE");\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK);\
+ printk(KERN_ALERT "line %i file %s", __LINE__, __FILE__);\
+ newValue <<= MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET;\
+ newValue &= MLB_MAILBOX_SYSCONFIG_SoftReset_MASK;\
+ newValue |= data;\
+ printk(KERN_ALERT "line %i file %s", __LINE__, __FILE__);\
+ WR_MEM_32_VOLATILE((u32)(base_address)+offset, newValue);\
+ printk(KERN_ALERT "line %i file %s", __LINE__, __FILE__);\
+} while (0)
+
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleRead32(base_address)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleRead32),\
+(((RD_MEM_32_VOLATILE((((u32)(base_address))+\
+(MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\
+MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET))
+
+
+#define MLBMAILBOX_SYSCONFIGAutoIdleWrite32(base_address, value)\
+{\
+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\
+ register u32 data =\
+ RD_MEM_32_VOLATILE(((u32)(base_address))+offset);\
+ register u32 newValue = ((u32)(value));\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleWrite32);\
+ data &= ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK);\
+ newValue <<= MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET;\
+ newValue &= MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE((u32)(base_address)+offset, newValue);\
+}
+
+
+#define MLBMAILBOX_SYSSTATUSResetDoneRead32(base_address)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSSTATUSResetDoneRead32),\
+(((RD_MEM_32_VOLATILE((((u32)(base_address))+\
+(MLB_MAILBOX_SYSSTATUS_OFFSET)))) &\
+MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\
+MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET))
+
+
+#define MLBMAILBOX_MESSAGE___0_15ReadRegister32(base_address, bank)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_MESSAGE___0_15ReadRegister32),\
+RD_MEM_32_VOLATILE(((u32)(base_address))+\
+(MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_MESSAGE___0_15_OFFSET+(\
+(bank)*MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP))))
+
+
+#define MLBMAILBOX_MESSAGE___0_15WriteRegister32(base_address, bank, value)\
+do {\
+ const u32 offset = MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\
+ MLB_MAILBOX_MESSAGE___0_15_OFFSET +\
+ ((bank)*MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP);\
+ register u32 newValue = ((u32)(value));\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_MESSAGE___0_15WriteRegister32);\
+ WR_MEM_32_VOLATILE(((u32)(base_address))+offset, newValue);\
+} while (0)
+
+
+#define MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32(base_address, bank)\
+(_DEBUG_LEVEL_1_EASI(\
+EASIL1_MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32),\
+RD_MEM_32_VOLATILE(((u32)(base_address))+\
+(MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+\
+((bank)*MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP))))
+
+
+#define MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32(base_address, bank)\
+(_DEBUG_LEVEL_1_EASI(\
+EASIL1_MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32),\
+(((RD_MEM_32_VOLATILE(((u32)(base_address))+\
+(MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+\
+((bank)*MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP)))) &\
+MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK) >>\
+MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET))
+
+
+#define MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32(base_address, bank)\
+(_DEBUG_LEVEL_1_EASI(\
+EASIL1_MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32),\
+(((RD_MEM_32_VOLATILE(((u32)(base_address))+\
+(MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET +\
+MLB_MAILBOX_MSGSTATUS___0_15_OFFSET+\
+((bank)*MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP)))) &\
+MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK) >>\
+MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET))
+
+
+#define MLBMAILBOX_IRQSTATUS___0_3ReadRegister32(base_address, bank)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQSTATUS___0_3ReadRegister32),\
+RD_MEM_32_VOLATILE(((u32)(base_address))+\
+(MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+\
+((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP))))
+
+
+#define MLBMAILBOX_IRQSTATUS___0_3WriteRegister32(base_address, bank, value)\
+do {\
+ const u32 offset = MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\
+ ((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\
+ register u32 newValue = ((u32)(value));\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQSTATUS___0_3WriteRegister32);\
+ WR_MEM_32_VOLATILE(((u32)(base_address))+offset, newValue);\
+} while (0)
+
+
+#define MLBMAILBOX_IRQENABLE___0_3ReadRegister32(base_address, bank)\
+(_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQENABLE___0_3ReadRegister32),\
+RD_MEM_32_VOLATILE(((u32)(base_address))+\
+(MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+MLB_MAILBOX_IRQENABLE___0_3_OFFSET+\
+((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP))))
+
+
+#define MLBMAILBOX_IRQENABLE___0_3WriteRegister32(base_address, bank, value)\
+do {\
+ const u32 offset = MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\
+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\
+ ((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\
+ register u32 newValue = ((u32)(value));\
+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQENABLE___0_3WriteRegister32);\
+ WR_MEM_32_VOLATILE(((u32)(base_address))+offset, newValue);\
+} while (0)
+
+
+#endif /* USE_LEVEL_1_MACROS */
+
+#endif /* _MLB_REG_ACM_H */
diff --git a/arch/arm/plat-omap/include/syslink/MMUAccInt.h b/arch/arm/plat-omap/include/syslink/MMUAccInt.h
new file mode 100644
index 000000000000..2aa0fa2436ae
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/MMUAccInt.h
@@ -0,0 +1,180 @@
+/*
+ * MMUAccInt.h
+ *
+ * Syslink ducati driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_ACC_INT_H
+#define _MMU_ACC_INT_H
+
+
+/* Register offset address definitions */
+
+#define MMU_MMU_REVISION_OFFSET 0x0
+#define MMU_MMU_SYSCONFIG_OFFSET 0x10
+#define MMU_MMU_SYSSTATUS_OFFSET 014
+#define MMU_MMU_IRQSTATUS_OFFSET 0x18
+#define MMU_MMU_IRQENABLE_OFFSET 0x1c
+#define MMU_MMU_WALKING_ST_OFFSET 0x40
+#define MMU_MMU_CNTL_OFFSET 0x44
+#define MMU_MMU_FAULT_AD_OFFSET 0x48
+#define MMU_MMU_TTB_OFFSET 0x4c
+#define MMU_MMU_LOCK_OFFSET 0x50
+#define MMU_MMU_LD_TLB_OFFSET 0x54
+#define MMU_MMU_CAM_OFFSET 0x58
+#define MMU_MMU_RAM_OFFSET 0x5c
+#define MMU_MMU_GFLUSH_OFFSET 0x60
+#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
+#define MMU_MMU_READ_CAM_OFFSET 0x68
+#define MMU_MMU_READ_RAM_OFFSET 0x6c
+#define MMU_MMU_EMU_FAULT_AD_OFFSET 0x70
+#define MMU_MMU_FAULT_PC_OFFSET 0x80
+#define MMU_MMU_FAULT_STATUS_OFFSET 0x84
+
+/* Bitfield mask and offset declarations */
+
+#define MMU_MMU_REVISION_Rev_MASK 0xff
+#define MMU_MMU_REVISION_Rev_OFFSET 0
+
+#define MMU_MMU_SYSCONFIG_ClockActivity_MASK 0x300
+#define MMU_MMU_SYSCONFIG_ClockActivity_OFFSET 8
+
+#define MMU_MMU_SYSCONFIG_IdleMode_MASK 0x18
+#define MMU_MMU_SYSCONFIG_IdleMode_OFFSET 3
+
+#define MMU_MMU_SYSCONFIG_SoftReset_MASK 0x2
+#define MMU_MMU_SYSCONFIG_SoftReset_OFFSET 1
+
+#define MMU_MMU_SYSCONFIG_AutoIdle_MASK 0x1
+#define MMU_MMU_SYSCONFIG_AutoIdle_OFFSET 0
+
+#define MMU_MMU_SYSSTATUS_ResetDone_MASK 0x1
+#define MMU_MMU_SYSSTATUS_ResetDone_OFFSET 0
+
+#define MMU_MMU_IRQSTATUS_MultiHitFault_MASK 0x10
+#define MMU_MMU_IRQSTATUS_MultiHitFault_OFFSET 4
+
+#define MMU_MMU_IRQSTATUS_TableWalkFault_MASK 0x8
+#define MMU_MMU_IRQSTATUS_TableWalkFault_OFFSET 3
+
+#define MMU_MMU_IRQSTATUS_EMUMiss_MASK 0x4
+#define MMU_MMU_IRQSTATUS_EMUMiss_OFFSET 2
+
+#define MMU_MMU_IRQSTATUS_TranslationFault_MASK 0x2
+#define MMU_MMU_IRQSTATUS_TranslationFault_OFFSET 1
+
+#define MMU_MMU_IRQSTATUS_TLBMiss_MASK 0x1
+#define MMU_MMU_IRQSTATUS_TLBMiss_OFFSET 0
+
+#define MMU_MMU_IRQENABLE_MultiHitFault_MASK 0x10
+#define MMU_MMU_IRQENABLE_MultiHitFault_OFFSET 4
+
+#define MMU_MMU_IRQENABLE_TableWalkFault_MASK 0x8
+#define MMU_MMU_IRQENABLE_TableWalkFault_OFFSET 3
+
+#define MMU_MMU_IRQENABLE_EMUMiss_MASK 0x4
+#define MMU_MMU_IRQENABLE_EMUMiss_OFFSET 2
+
+#define MMU_MMU_IRQENABLE_TranslationFault_MASK 0x2
+#define MMU_MMU_IRQENABLE_TranslationFault_OFFSET 1
+
+#define MMU_MMU_IRQENABLE_TLBMiss_MASK 0x1
+#define MMU_MMU_IRQENABLE_TLBMiss_OFFSET 0
+
+#define MMU_MMU_WALKING_ST_TWLRunning_MASK 0x1
+#define MMU_MMU_WALKING_ST_TWLRunning_OFFSET 0
+
+#define MMU_MMU_CNTL_EmuTLBUpdate_MASK 0x8
+#define MMU_MMU_CNTL_EmuTLBUpdate_OFFSET 3
+
+#define MMU_MMU_CNTL_TWLEnable_MASK 0x4
+#define MMU_MMU_CNTL_TWLEnable_OFFSET 2
+
+#define MMU_MMU_CNTL_MMUEnable_MASK 0x2
+#define MMU_MMU_CNTL_MMUEnable_OFFSET 1
+
+#define MMU_MMU_FAULT_AD_FaultAddress_MASK 0xffffffff
+#define MMU_MMU_FAULT_AD_FaultAddress_OFFSET 0
+
+#define MMU_MMU_TTB_TTBAddress_MASK 0xffffff00
+#define MMU_MMU_TTB_TTBAddress_OFFSET 8
+
+#define MMU_MMU_LOCK_BaseValue_MASK 0xfc00
+#define MMU_MMU_LOCK_BaseValue_OFFSET 10
+
+#define MMU_MMU_LOCK_CurrentVictim_MASK 0x3f0
+#define MMU_MMU_LOCK_CurrentVictim_OFFSET 4
+
+#define MMU_MMU_LD_TLB_LdTLBItem_MASK 0x1
+#define MMU_MMU_LD_TLB_LdTLBItem_OFFSET 0
+
+#define MMU_MMU_CAM_VATag_MASK 0xfffff000
+#define MMU_MMU_CAM_VATag_OFFSET 12
+
+#define MMU_MMU_CAM_P_MASK 0x8
+#define MMU_MMU_CAM_P_OFFSET 3
+
+#define MMU_MMU_CAM_V_MASK 0x4
+#define MMU_MMU_CAM_V_OFFSET 2
+
+#define MMU_MMU_CAM_PageSize_MASK 0x3
+#define MMU_MMU_CAM_PageSize_OFFSET 0
+
+#define MMU_MMU_RAM_PhysicalAddress_MASK 0xfffff000
+#define MMU_MMU_RAM_PhysicalAddress_OFFSET 12
+
+#define MMU_MMU_RAM_Endianness_MASK 0x200
+#define MMU_MMU_RAM_Endianness_OFFSET 9
+
+#define MMU_MMU_RAM_ElementSize_MASK 0x180
+#define MMU_MMU_RAM_ElementSize_OFFSET 7
+
+#define MMU_MMU_RAM_Mixed_MASK 0x40
+#define MMU_MMU_RAM_Mixed_OFFSET 6
+
+#define MMU_MMU_GFLUSH_GlobalFlush_MASK 0x1
+#define MMU_MMU_GFLUSH_GlobalFlush_OFFSET 0
+
+#define MMU_MMU_FLUSH_ENTRY_FlushEntry_MASK 0x1
+#define MMU_MMU_FLUSH_ENTRY_FlushEntry_OFFSET 0
+
+#define MMU_MMU_READ_CAM_VATag_MASK 0xfffff000
+#define MMU_MMU_READ_CAM_VATag_OFFSET 12
+
+#define MMU_MMU_READ_CAM_P_MASK 0x8
+#define MMU_MMU_READ_CAM_P_OFFSET 3
+
+#define MMU_MMU_READ_CAM_V_MASK 0x4
+#define MMU_MMU_READ_CAM_V_OFFSET 2
+
+#define MMU_MMU_READ_CAM_PageSize_MASK 0x3
+#define MMU_MMU_READ_CAM_PageSize_OFFSET 0
+
+#define MMU_MMU_READ_RAM_PhysicalAddress_MASK 0xfffff000
+#define MMU_MMU_READ_RAM_PhysicalAddress_OFFSET 12
+
+#define MMU_MMU_READ_RAM_Endianness_MASK 0x200
+#define MMU_MMU_READ_RAM_Endianness_OFFSET 9
+
+#define MMU_MMU_READ_RAM_ElementSize_MASK 0x180
+#define MMU_MMU_READ_RAM_ElementSize_OFFSET 7
+
+#define MMU_MMU_READ_RAM_Mixed_MASK 0x40
+#define MMU_MMU_READ_RAM_Mixed_OFFSET 6
+
+#define MMU_MMU_EMU_FAULT_AD_EmuFaultAddress_MASK 0xffffffff
+#define MMU_MMU_EMU_FAULT_AD_EmuFaultAddress_OFFSET 0
+
+#endif /* _MMU_ACC_INT_H */
+/* EOF */
+
diff --git a/arch/arm/plat-omap/include/syslink/MMURegAcM.h b/arch/arm/plat-omap/include/syslink/MMURegAcM.h
new file mode 100644
index 000000000000..c4944110789e
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/MMURegAcM.h
@@ -0,0 +1,434 @@
+/*
+ * MMURegAcM.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_REG_ACM_H
+#define _MMU_REG_ACM_H
+
+
+
+#include "GlobalTypes.h"
+#include "MMUAccInt.h"
+
+
+/*
+* EXPORTED DEFINITIONS
+*
+*/
+
+#if defined(USE_LEVEL_1_MACROS)
+
+
+#define MMUMMU_SYSCONFIGReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
+
+
+#define MMUMMU_SYSCONFIGWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_SYSCONFIGClockActivityGet32(var)\
+ ((u32)(((var) & MMU_MMU_SYSCONFIG_ClockActivity_MASK)\
+ >> MMU_MMU_SYSCONFIG_ClockActivity_OFFSET))
+
+#define mmu_sisconf_auto_idle_set32(var, value)\
+ ((((var) & ~(MMU_MMU_SYSCONFIG_AutoIdle_MASK)) |\
+ (((value) << MMU_MMU_SYSCONFIG_AutoIdle_OFFSET) &\
+ MMU_MMU_SYSCONFIG_AutoIdle_MASK)))
+
+#define MMUMMU_IRQSTATUSReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
+
+
+#define MMUMMU_IRQSTATUSWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+
+#define MMUMMU_IRQENABLEReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_IRQENABLE_OFFSET))
+
+
+#define MMUMMU_IRQENABLEWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_IRQENABLETableWalkFaultSet32(var, value)\
+ ((((var) & ~(MMU_MMU_IRQENABLE_TableWalkFault_MASK)) |\
+ (((value) << MMU_MMU_IRQENABLE_TableWalkFault_OFFSET) &\
+ MMU_MMU_IRQENABLE_TableWalkFault_MASK)))
+
+#define MMUMMU_IRQENABLETranslationFaultRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_IRQENABLE_OFFSET)))) &\
+ MMU_MMU_IRQENABLE_TranslationFault_MASK) >>\
+ MMU_MMU_IRQENABLE_TranslationFault_OFFSET))
+
+
+
+#define MMUMMU_IRQENABLETranslationFaultSet32(var, value)\
+ ((((var) & ~(MMU_MMU_IRQENABLE_TranslationFault_MASK)) |\
+ (((value) << MMU_MMU_IRQENABLE_TranslationFault_OFFSET) &\
+ MMU_MMU_IRQENABLE_TranslationFault_MASK)))
+
+
+#define MMUMMU_IRQENABLETLBMissRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_IRQENABLE_OFFSET)))) &\
+ MMU_MMU_IRQENABLE_TLBMiss_MASK) >>\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET))
+
+
+#define MMUMMU_IRQENABLETLBMissReadIsTrMissIntM32(base_address)\
+ ((MMUMMU_IRQENABLETLBMissTrMissIntM == (MMUMMU_IRQENABLETLBMissE)\
+ (((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_IRQENABLE_OFFSET)))) &\
+ MMU_MMU_IRQENABLE_TLBMiss_MASK) >>\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET)))
+
+
+#define MMUMMU_IRQENABLETLBMissReadIsTrMissGInt32(base_address)\
+ ((MMUMMU_IRQENABLETLBMissTrMissGInt == (MMUMMU_IRQENABLETLBMissE)\
+ (((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_IRQENABLE_OFFSET)))) &\
+ MMU_MMU_IRQENABLE_TLBMiss_MASK) >>\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET)))
+
+
+#define MMUMMU_IRQENABLETLBMissGet32(var)\
+ ((u32)(((var) & MMU_MMU_IRQENABLE_TLBMiss_MASK)\
+ >> MMU_MMU_IRQENABLE_TLBMiss_OFFSET))
+
+
+#define MMUMMU_IRQENABLETLBMissIsTrMissIntM32(var)\
+ ((MMUMMU_IRQENABLETLBMissTrMissIntM == \
+ (MMUMMU_IRQENABLETLBMissE)(((var) & MMU_MMU_IRQENABLE_TLBMiss_MASK) >>\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET)))
+
+#define MMUMMU_IRQENABLETLBMissIsTrMissGInt32(var)\
+ ((MMUMMU_IRQENABLETLBMissTrMissGInt ==\
+ (MMUMMU_IRQENABLETLBMissE)(((var) & MMU_MMU_IRQENABLE_TLBMiss_MASK) >>\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET)))
+
+#define MMUMMU_IRQENABLETLBMissWrite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_IRQENABLE_TLBMiss_MASK);\
+ newValue <<= MMU_MMU_IRQENABLE_TLBMiss_OFFSET;\
+ newValue &= MMU_MMU_IRQENABLE_TLBMiss_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+
+#define MMUMMU_IRQENABLETLBMissWriteTrMissIntM32(base_address)\
+{\
+ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+ const u32 newValue = (u32)MMUMMU_IRQENABLETLBMissTrMissIntM <<\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE(base_address+offset);\
+ data &= ~(MMU_MMU_IRQENABLE_TLBMiss_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE(base_address+offset, data);\
+}
+
+
+#define MMUMMU_IRQENABLETLBMissWriteTrMissGInt32(base_address)\
+{\
+ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+ const u32 newValue = (u32)MMUMMU_IRQENABLETLBMissTrMissGInt <<\
+ MMU_MMU_IRQENABLE_TLBMiss_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE(base_address+offset);\
+ data &= ~(MMU_MMU_IRQENABLE_TLBMiss_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE(base_address+offset, data);\
+}
+
+
+#define MMUMMU_IRQENABLETLBMissSet32(var, value)\
+ ((((var) & ~(MMU_MMU_IRQENABLE_TLBMiss_MASK)) |\
+ (((value) << MMU_MMU_IRQENABLE_TLBMiss_OFFSET) &\
+ MMU_MMU_IRQENABLE_TLBMiss_MASK)))
+
+
+#define MMUMMU_WALKING_STTWLRunningRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_WALKING_ST_OFFSET)))) &\
+ MMU_MMU_WALKING_ST_TWLRunning_MASK) >>\
+ MMU_MMU_WALKING_ST_TWLRunning_OFFSET))
+
+
+
+#define MMUMMU_CNTLTWLEnableRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
+ MMU_MMU_CNTL_TWLEnable_MASK) >>\
+ MMU_MMU_CNTL_TWLEnable_OFFSET))
+
+
+#define MMUMMU_CNTLTWLEnableWrite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_CNTL_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_CNTL_TWLEnable_MASK);\
+ newValue <<= MMU_MMU_CNTL_TWLEnable_OFFSET;\
+ newValue &= MMU_MMU_CNTL_TWLEnable_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+
+#define MMUMMU_CNTLMMUEnableWrite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_CNTL_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_CNTL_MMUEnable_MASK);\
+ newValue <<= MMU_MMU_CNTL_MMUEnable_OFFSET;\
+ newValue &= MMU_MMU_CNTL_MMUEnable_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+
+#define MMUMMU_FAULT_ADReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_FAULT_AD_OFFSET))
+
+
+#define MMUMMU_FAULT_ADFaultAddressRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_FAULT_AD_OFFSET)))) &\
+ MMU_MMU_FAULT_AD_FaultAddress_MASK) >>\
+ MMU_MMU_FAULT_AD_FaultAddress_OFFSET))
+
+#define MMUMMU_FAULT_ADFaultAddressGet32(var)\
+ ((u32)(((var) & MMU_MMU_FAULT_AD_FaultAddress_MASK)\
+ >> MMU_MMU_FAULT_AD_FaultAddress_OFFSET))
+
+
+#define MMUMMU_TTBReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_TTB_OFFSET))
+
+#define MMUMMU_TTBWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_TTB_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_TTBTTBAddressRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_TTB_OFFSET)))) &\
+ MMU_MMU_TTB_TTBAddress_MASK) >>\
+ MMU_MMU_TTB_TTBAddress_OFFSET))
+
+#define MMUMMU_TTBTTBAddressGet32(var)\
+ ((u32)(((var) & MMU_MMU_TTB_TTBAddress_MASK)\
+ >> MMU_MMU_TTB_TTBAddress_OFFSET))
+
+
+#define MMUMMU_TTBTTBAddressWrite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_TTB_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_TTB_TTBAddress_MASK);\
+ newValue <<= MMU_MMU_TTB_TTBAddress_OFFSET;\
+ newValue &= MMU_MMU_TTB_TTBAddress_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+#define MMUMMU_TTBTTBAddressSet32(var, value)\
+ ((((var) & ~(MMU_MMU_TTB_TTBAddress_MASK)) |\
+ (((value) << MMU_MMU_TTB_TTBAddress_OFFSET) &\
+ MMU_MMU_TTB_TTBAddress_MASK)))
+
+
+#define mmu_lckread_reg_32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_LOCK_OFFSET))
+
+#define mmu_lck_write_reg32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LOCK_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+
+#define MMUMMU_LOCKBaseValueRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+ MMU_MMU_LOCK_BaseValue_MASK) >>\
+ MMU_MMU_LOCK_BaseValue_OFFSET))
+#define MMUMMU_LOCKBaseValueGet32(var)\
+ ((u32)(((var) & MMU_MMU_LOCK_BaseValue_MASK)\
+ >> MMU_MMU_LOCK_BaseValue_OFFSET))
+
+
+#define MMUMMU_LOCKBaseValueWrite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LOCK_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_LOCK_BaseValue_MASK);\
+ newValue <<= MMU_MMU_LOCK_BaseValue_OFFSET;\
+ newValue &= MMU_MMU_LOCK_BaseValue_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+
+#define MMUMMU_LOCKBaseValueSet32(var, value)\
+ ((((var) & ~(MMU_MMU_LOCK_BaseValue_MASK)) |\
+ (((value) << MMU_MMU_LOCK_BaseValue_OFFSET) &\
+ MMU_MMU_LOCK_BaseValue_MASK)))
+
+#define MMUMMU_LOCKCurrentVictimRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+ MMU_MMU_LOCK_CurrentVictim_MASK) >>\
+ MMU_MMU_LOCK_CurrentVictim_OFFSET))
+
+
+#define MMUMMU_LOCKCurrentVictimGet32(var)\
+ ((u32)(((var) & MMU_MMU_LOCK_CurrentVictim_MASK)\
+ >> MMU_MMU_LOCK_CurrentVictim_OFFSET))
+
+
+#define mmu_lck_crnt_vctmwite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LOCK_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_LOCK_CurrentVictim_MASK);\
+ newValue <<= MMU_MMU_LOCK_CurrentVictim_OFFSET;\
+ newValue &= MMU_MMU_LOCK_CurrentVictim_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+
+#define MMUMMU_LOCKCurrentVictimSet32(var, value)\
+ ((((var) & ~(MMU_MMU_LOCK_CurrentVictim_MASK)) |\
+ (((value) << MMU_MMU_LOCK_CurrentVictim_OFFSET) &\
+ MMU_MMU_LOCK_CurrentVictim_MASK)))
+
+
+#define MMUMMU_LD_TLBReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_LD_TLB_OFFSET))
+
+#define mmu_ld_tlbwrt_reg32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_LD_TLBLdTLBItemRead32(base_address)\
+ ((((RD_MEM_32_VOLATILE(((base_address)+(MMU_MMU_LD_TLB_OFFSET)))) &\
+ MMU_MMU_LD_TLB_LdTLBItem_MASK) >>\
+ MMU_MMU_LD_TLB_LdTLBItem_OFFSET))
+
+
+#define MMUMMU_CAMReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_READ_CAM_OFFSET))
+
+
+#define MMUMMU_CAMWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_CAM_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_RAMReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_READ_RAM_OFFSET))
+
+
+#define MMUMMU_RAMWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_RAM_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_GFLUSHGlobalFlushWrite32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_GFLUSH_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE((base_address)+offset);\
+ register u32 newValue = (value);\
+ data &= ~(MMU_MMU_GFLUSH_GlobalFlush_MASK);\
+ newValue <<= MMU_MMU_GFLUSH_GlobalFlush_OFFSET;\
+ newValue &= MMU_MMU_GFLUSH_GlobalFlush_MASK;\
+ newValue |= data;\
+ WR_MEM_32_VOLATILE(base_address+offset, newValue);\
+}
+
+#define MMUMMU_GFLUSHGlobalFlushWritenft_w32(base_address)\
+{\
+ const u32 offset = MMU_MMU_GFLUSH_OFFSET;\
+ const u32 newValue = (u32)MMUMMU_GFLUSHGlobalFlushnft_w <<\
+ MMU_MMU_GFLUSH_GlobalFlush_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE(base_address+offset);\
+ data &= ~(MMU_MMU_GFLUSH_GlobalFlush_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE(base_address+offset, data);\
+}
+
+#define MMUMMU_GFLUSHGlobalFlushWriteflush_w32(base_address)\
+{\
+ const u32 offset = MMU_MMU_GFLUSH_OFFSET;\
+ const u32 newValue = (u32)MMUMMU_GFLUSHGlobalFlushflush_w <<\
+ MMU_MMU_GFLUSH_GlobalFlush_OFFSET;\
+ register u32 data = RD_MEM_32_VOLATILE(base_address+offset);\
+ data &= ~(MMU_MMU_GFLUSH_GlobalFlush_MASK);\
+ data |= newValue;\
+ WR_MEM_32_VOLATILE(base_address+offset, data);\
+}
+
+
+#define MMUMMU_GFLUSHGlobalFlushSet32(var, value)\
+ ((((var) & ~(MMU_MMU_GFLUSH_GlobalFlush_MASK)) |\
+ (((value) << MMU_MMU_GFLUSH_GlobalFlush_OFFSET) &\
+ MMU_MMU_GFLUSH_GlobalFlush_MASK)))
+
+#define MMUMMU_FLUSH_ENTRYReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_FLUSH_ENTRY_OFFSET))
+
+
+#define MMUMMU_FLUSH_ENTRYWriteRegister32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
+ register u32 newValue = (value);\
+ WR_MEM_32_VOLATILE((base_address)+offset, newValue);\
+}
+
+#define MMUMMU_FAULT_PCReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_FAULT_PC_OFFSET))
+
+#define MMUMMU_FAULT_STATUSReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_FAULT_STATUS_OFFSET))
+
+#define MMUMMU_FAULT_EMUAddressReadRegister32(base_address)\
+ (RD_MEM_32_VOLATILE((base_address)+MMU_MMU_EMU_FAULT_AD_OFFSET))
+
+#endif /* USE_LEVEL_1_MACROS */
+
+#endif /* _MMU_REG_ACM_H */
+/* EOF */
+
diff --git a/arch/arm/plat-omap/include/syslink/_sysmgr.h b/arch/arm/plat-omap/include/syslink/_sysmgr.h
new file mode 100644
index 000000000000..58fbdd378155
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/_sysmgr.h
@@ -0,0 +1,50 @@
+/*
+ * _sysmgr.h
+ *
+ * Defines for system manager functions
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef __SYSMGR_H_
+#define __SYSMGR_H_
+
+/* Structure to retrieve the scalability proc info from the slave */
+struct sysmgr_proc_config {
+ u32 proc_id;
+ u32 use_notify;
+ u32 use_messageq;
+ u32 use_heapbuf;
+ u32 use_frameq;
+ u32 use_ringio;
+ u32 use_listmp;
+ u32 use_nameserver;
+ u32 boot_mode;
+};
+
+/* Function to set the boot load page address for a slave */
+void sysmgr_set_boot_load_page(u16 proc_id, u32 boot_load_page);
+
+/* Function to get configuration values for a host object(component/instance) */
+u32 sysmgr_get_object_config(u16 proc_id, void *config, u32 cmd_id, u32 size);
+
+/* Function to put configuration values for a slave object(component/instance)*/
+u32 sysmgr_put_object_config(u16 proc_id, void *config, u32 cmd_id, u32 size);
+
+/* Function to wait for scalability handshake value. */
+void sysmgr_wait_for_scalability_info(u16 proc_id);
+
+/* Function to wait for slave to complete setup */
+void sysmgr_wait_for_slave_setup(u16 proc_id);
+
+
+#endif /* ifndef __SYSMGR_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/atomic_linux.h b/arch/arm/plat-omap/include/syslink/atomic_linux.h
new file mode 100644
index 000000000000..76fd8a1b9dc5
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/atomic_linux.h
@@ -0,0 +1,105 @@
+/*
+* atomic_linux.h
+*
+* Atomic operations functions
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+
+#ifndef _ATOMIC_LINUX_H
+#define _ATOMIC_LINUX_H
+
+#include <linux/types.h>
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+/*
+ * ======== atomic_cmpmask_and_set ========
+ * Purpose:
+ * This will compare a mask and set if not equal
+ */
+static inline void atomic_cmpmask_and_set(atomic_t *v, u32 mask, u32 val)
+{
+ s32 ret;
+ unsigned long flags;
+ atomic_t *atm = v;
+
+ raw_local_irq_save(flags);
+ ret = atm->counter;
+ if (likely(((ret & mask) != mask)))
+ atm->counter = val;
+ raw_local_irq_restore(flags);
+}
+
+/*
+ * ======== atomic_cmpmask_and_set ========
+ * Purpose:
+ * This will compare a mask and then check current value less than
+ * provided value.
+ */
+static inline bool atomic_cmpmask_and_lt(atomic_t *v, u32 mask, u32 val)
+{
+ bool ret = true;
+ atomic_t *atm = v;
+ s32 cur;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ cur = atm->counter;
+ /* Compare mask, if matches then compare val */
+ if (likely(((cur & mask) == mask))) {
+ if (likely(cur >= val))
+ ret = false;
+ }
+ raw_local_irq_restore(flags);
+
+ /* retval = true if mask matches and current value is less than given
+ * value */
+ /* retval = false either mask doesnot matches or current value is not
+ * less than given value */
+ return ret;
+}
+
+
+/*
+ * ======== atomic_cmpmask_and_set ========
+ * Purpose:
+ * This will compare a mask and then check current value greater than
+ * provided value.
+ */
+static inline bool atomic_cmpmask_and_gt(atomic_t *v, u32 mask, u32 val)
+{
+ bool ret = false;
+ atomic_t *atm = v;
+ s32 cur;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ cur = atm->counter;
+ /* Compare mask, if matches then compare val */
+ if (likely(((cur & mask) == mask))) {
+ if (likely(cur > val))
+ ret = true;
+ }
+
+ raw_local_irq_restore(flags);
+ /* retval = true if mask matches and current value is less than given
+ * value */
+ /* etval =false either mask doesnot matches or current value is not
+ * greater than given value */
+ return ret;
+}
+
+#endif /* if !defined(_ATOMIC_LINUX_H) */
+
diff --git a/arch/arm/plat-omap/include/syslink/ducatienabler.h b/arch/arm/plat-omap/include/syslink/ducatienabler.h
new file mode 100644
index 000000000000..f2f8023ce8b4
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/ducatienabler.h
@@ -0,0 +1,291 @@
+/*
+ * ducatienabler.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#ifndef _DDUCATIMMU_ENABLER_H_
+#define _DDUCATIMMU_ENABLER_H_
+
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <syslink/hw_defs.h>
+#include <syslink/hw_mmu.h>
+
+
+#define PAGE_SIZE_4KB 0x1000
+#define PAGE_SIZE_64KB 0x10000
+#define PAGE_SIZE_1MB 0x100000
+#define PAGE_SIZE_16MB 0x1000000
+
+/* Define the Peripheral PAs and their Ducati VAs. */
+#define L4_PERIPHERAL_MBOX 0x4A0F4000
+#define DUCATI_PERIPHERAL_MBOX 0xAA0F4000
+
+#define L4_PERIPHERAL_I2C1 0x48070000
+#define DUCATI_PERIPHERAL_I2C1 0xA8070000
+#define L4_PERIPHERAL_I2C2 0x48072000
+#define DUCATI_PERIPHERAL_I2C2 0xA8072000
+#define L4_PERIPHERAL_I2C3 0x48060000
+#define DUCATI_PERIPHERAL_I2C3 0xA8060000
+
+#define L4_PERIPHERAL_DMA 0x4A056000
+#define DUCATI_PERIPHERAL_DMA 0xAA056000
+
+#define L4_PERIPHERAL_GPIO1 0x4A310000
+#define DUCATI_PERIPHERAL_GPIO1 0xAA310000
+#define L4_PERIPHERAL_GPIO2 0x48055000
+#define DUCATI_PERIPHERAL_GPIO2 0xA8055000
+#define L4_PERIPHERAL_GPIO3 0x48057000
+#define DUCATI_PERIPHERAL_GPIO3 0xA8057000
+
+#define L4_PERIPHERAL_GPTIMER3 0x48034000
+#define DUCATI_PERIPHERAL_GPTIMER3 0xA8034000
+#define L4_PERIPHERAL_GPTIMER4 0x48036000
+#define DUCATI_PERIPHERAL_GPTIMER4 0xA8036000
+#define L4_PERIPHERAL_GPTIMER9 0x48040000
+#define DUCATI_PERIPHERAL_GPTIMER9 0xA8040000
+#define L4_PERIPHERAL_GPTIMER11 0x48088000
+#define DUCATI_PERIPHERAL_GPTIMER11 0xA8088000
+
+#define L4_PERIPHERAL_UART1 0x4806A000
+#define DUCATI_PERIPHERAL_UART1 0xA806A000
+#define L4_PERIPHERAL_UART2 0x4806C000
+#define DUCATI_PERIPHERAL_UART2 0xA806C000
+#define L4_PERIPHERAL_UART3 0x48020000
+#define DUCATI_PERIPHERAL_UART3 0xA8020000
+#define L4_PERIPHERAL_UART4 0x4806E000
+#define DUCATI_PERIPHERAL_UART4 0xA806E000
+
+
+#define L3_TILER_VIEW0_ADDR 0x60000000
+#define DUCATIVA_TILER_VIEW0_ADDR 0x60000000
+#define DUCATIVA_TILER_VIEW0_LEN 0x20000000
+
+
+
+#if 0 /* Original definitions for OMAP4430. */
+/* Define the various Ducati Memory Regions. */
+/* The first 4K page of BOOTVECS is programmed as a TLB entry. The remaining */
+/* three pages are not used and are mapped to minimize number of PTEs */
+#define DUCATI_BOOTVECS_ADDR 0x1000
+#define DUCATI_BOOTVECS_LEN 0x3000
+
+#define DUCATI_EXTMEM_SYSM3_ADDR 0x4000
+#define DUCATI_EXTMEM_SYSM3_LEN 0x1FC000
+
+#define DUCATI_EXTMEM_APPM3_ADDR 0x10000000
+#define DUCATI_EXTMEM_APPM3_LEN 0x200000
+
+#define DUCATI_PRIVATE_SYSM3_DATA_ADDR 0x84000000
+#define DUCATI_PRIVATE_SYSM3_DATA_LEN 0x200000
+
+#define DUCATI_PRIVATE_APPM3_DATA_ADDR 0x8A000000
+#define DUCATI_PRIVATE_APPM3_DATA_LEN 0x200000
+
+#define DUCATI_SHARED_M3_DATA_ADDR 0x90000000
+#define DUCATI_SHARED_M3_DATA_LEN 0x100000
+
+#define DUCATI_SHARED_IPC_ADDR 0x98000000
+#define DUCATI_SHARED_IPC_LEN 0x100000
+
+#define DUCATI_SW_DMM_ADDR 0x80000000
+#define DUCATI_SW_DMM_LEN 0x400000
+#endif
+
+/* OMAP4430 SDC definitions */
+#define L4_PERIPHERAL_L4CFG 0x4A000000
+#define DUCATI_PERIPHERAL_L4CFG 0xAA000000
+
+#define L4_PERIPHERAL_L4PER 0x48000000
+#define DUCATI_PERIPHERAL_L4PER 0xA8000000
+
+#define L3_IVAHD_CONFIG 0x5A000000
+#define DUCATI_IVAHD_CONFIG 0xBA000000
+
+#define L3_IVAHD_SL2 0x5B000000
+#define DUCATI_IVAHD_SL2 0xBB000000
+
+#define L3_TILER_MODE0_1_ADDR 0x60000000
+#define DUCATI_TILER_MODE0_1_ADDR 0x60000000
+#define DUCATI_TILER_MODE0_1_LEN 0x10000000
+
+#define L3_TILER_MODE3_ADDR 0x78000000
+#define DUCATI_TILER_MODE3_ADDR 0x78000000
+#define DUCATI_TILER_MODE3_LEN 0x8000000
+
+#define DUCATI_BOOTVECS_UNUSED_ADDR 0x1000
+#define DUCATI_BOOTVECS_UNUSED_LEN 0x3000
+
+#define DUCATI_MEM_CODE_SYSM3_ADDR 0x4000
+#define DUCATI_MEM_CODE_SYSM3_LEN 0x1FC000
+
+#define DUCATI_MEM_CODE_APPM3_ADDR 0x800000
+#define DUCATI_MEM_CODE_APPM3_LEN 0x200000
+
+#define DUCATI_MEM_CONST_SYSM3_ADDR 0x80000000
+#define DUCATI_MEM_CONST_SYSM3_LEN 0x100000
+
+#define DUCATI_MEM_CONST_APPM3_ADDR 0x80100000
+#define DUCATI_MEM_CONST_APPM3_LEN 0x100000
+
+#define DUCATI_MEM_HEAP_SYSM3_ADDR 0x80200000
+#define DUCATI_MEM_HEAP_SYSM3_LEN 0x100000
+
+#define DUCATI_MEM_HEAP_APPM3_ADDR 0x80300000
+#define DUCATI_MEM_HEAP_APPM3_LEN 0x1000000
+
+#define DUCATI_MEM_MPU_DUCATI_SHMEM_ADDR 0x81300000
+#define DUCATI_MEM_MPU_DUCATI_SHMEM_LEN 0xC00000
+
+#define DUCATI_MEM_IPC_SHMEM_ADDR 0x81F00000
+#define DUCATI_MEM_IPC_SHMEM_LEN 0x100000
+
+#define DUCATI_MEM_IPC_HEAP0_ADDR 0xA0000000
+#define DUCATI_MEM_IPC_HEAP0_LEN 0x55000
+
+#define DUCATI_MEM_IPC_HEAP1_ADDR 0xA0055000
+#define DUCATI_MEM_IPC_HEAP1_LEN 0x55000
+
+#define DUCATI_MEM_IPC_HEAP2_ADDR 0xA00AA000
+#define DUCATI_MEM_IPC_HEAP2_LEN 0x56000
+
+
+/* Types of mapping attributes */
+
+/* MPU address is virtual and needs to be translated to physical addr */
+#define DSP_MAPVIRTUALADDR 0x00000000
+#define DSP_MAPPHYSICALADDR 0x00000001
+
+/* Mapped data is big endian */
+#define DSP_MAPBIGENDIAN 0x00000002
+#define DSP_MAPLITTLEENDIAN 0x00000000
+
+/* Element size is based on DSP r/w access size */
+#define DSP_MAPMIXEDELEMSIZE 0x00000004
+
+/*
+ * Element size for MMU mapping (8, 16, 32, or 64 bit)
+ * Ignored if DSP_MAPMIXEDELEMSIZE enabled
+ */
+#define DSP_MAPELEMSIZE8 0x00000008
+#define DSP_MAPELEMSIZE16 0x00000010
+#define DSP_MAPELEMSIZE32 0x00000020
+#define DSP_MAPELEMSIZE64 0x00000040
+
+#define DSP_MAPVMALLOCADDR 0x00000080
+#define DSP_MAPTILERADDR 0x00000100
+
+
+#define PG_MASK(pg_size) (~((pg_size)-1))
+#define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size))
+#define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size))
+
+
+struct mmu_entry {
+ u32 ul_phy_addr ;
+ u32 ul_virt_addr ;
+ u32 ul_size ;
+};
+
+struct memory_entry {
+ u32 ul_virt_addr;
+ u32 ul_size;
+};
+
+#if 0 /* Original definitions for OMAP4430. */
+static const struct mmu_entry l4_map[] = {
+ /* Mailbox 4KB*/
+ {L4_PERIPHERAL_MBOX, DUCATI_PERIPHERAL_MBOX, HW_PAGE_SIZE_4KB},
+ /* I2C 4KB each */
+ {L4_PERIPHERAL_I2C1, DUCATI_PERIPHERAL_I2C1, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_I2C2, DUCATI_PERIPHERAL_I2C2, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_I2C3, DUCATI_PERIPHERAL_I2C3, HW_PAGE_SIZE_4KB},
+ /* DMA 4KB */
+ {L4_PERIPHERAL_DMA, DUCATI_PERIPHERAL_DMA, HW_PAGE_SIZE_4KB},
+ /* GPIO Banks 4KB each */
+ {L4_PERIPHERAL_GPIO1, DUCATI_PERIPHERAL_GPIO1, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_GPIO2, DUCATI_PERIPHERAL_GPIO2, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_GPIO3, DUCATI_PERIPHERAL_GPIO3, HW_PAGE_SIZE_4KB},
+ /* GPTimers 4KB each */
+ {L4_PERIPHERAL_GPTIMER3, DUCATI_PERIPHERAL_GPTIMER3, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_GPTIMER4, DUCATI_PERIPHERAL_GPTIMER4, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_GPTIMER9, DUCATI_PERIPHERAL_GPTIMER9, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_GPTIMER11, DUCATI_PERIPHERAL_GPTIMER11,
+ HW_PAGE_SIZE_4KB},
+ /* UARTs 4KB each */
+ {L4_PERIPHERAL_UART1, DUCATI_PERIPHERAL_UART1, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_UART2, DUCATI_PERIPHERAL_UART2, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_UART3, DUCATI_PERIPHERAL_UART3, HW_PAGE_SIZE_4KB},
+ {L4_PERIPHERAL_UART4, DUCATI_PERIPHERAL_UART4,
+ HW_PAGE_SIZE_4KB},
+};
+
+static const struct memory_entry l3_memory_regions[] = {
+ /* BootVecs regions */
+ {0, (PAGE_SIZE_1MB * 2)},
+ /* EXTMEM_CORE1: 0x10000000 to 0x100FFFFF */
+ {DUCATI_EXTMEM_APPM3_ADDR, DUCATI_EXTMEM_APPM3_LEN},
+ /* PRIVATE_SYSM3_DATA*/
+ {DUCATI_PRIVATE_SYSM3_DATA_ADDR, DUCATI_PRIVATE_SYSM3_DATA_LEN},
+ /* PRIVATE_APPM3_DATA*/
+ {DUCATI_PRIVATE_APPM3_DATA_ADDR, DUCATI_PRIVATE_APPM3_DATA_LEN},
+ /* SHARED_M3_DATA*/
+ {DUCATI_SHARED_M3_DATA_ADDR, DUCATI_SHARED_M3_DATA_LEN},
+ /* IPC*/
+ {DUCATI_SHARED_IPC_ADDR, DUCATI_SHARED_IPC_LEN},
+ /* DMM*/
+ {DUCATI_SW_DMM_ADDR, DUCATI_SW_DMM_LEN},
+};
+#endif
+
+/* OMAP4430 SDC definitions */
+static const struct mmu_entry l4_map[] = {
+ /* TILER 8-bit and 16-bit modes */
+ {L3_TILER_MODE0_1_ADDR, DUCATI_TILER_MODE0_1_ADDR,
+ (HW_PAGE_SIZE_16MB * 16)},
+ /* TILER: Pages-mode */
+ {L3_TILER_MODE3_ADDR, DUCATI_TILER_MODE3_ADDR,
+ (HW_PAGE_SIZE_16MB * 8)},
+ /* L4_CFG: Covers all modules in L4_CFG 16MB*/
+ {L4_PERIPHERAL_L4CFG, DUCATI_PERIPHERAL_L4CFG, HW_PAGE_SIZE_16MB},
+ /* L4_PER: Covers all modules in L4_PER 16MB*/
+ {L4_PERIPHERAL_L4PER, DUCATI_PERIPHERAL_L4PER, HW_PAGE_SIZE_16MB},
+ /* IVA_HD Config: Covers all modules in IVA_HD Config space 16MB */
+ {L3_IVAHD_CONFIG, DUCATI_IVAHD_CONFIG, HW_PAGE_SIZE_16MB},
+ /* IVA_HD SL2: Covers all memory in IVA_HD SL2 space 16MB */
+ {L3_IVAHD_SL2, DUCATI_IVAHD_SL2, HW_PAGE_SIZE_16MB},
+};
+
+static const struct memory_entry l3_memory_regions[] = {
+ /* MEM_IPC_HEAP0, MEM_IPC_HEAP1, MEM_IPC_HEAP2 */
+ {DUCATI_MEM_IPC_HEAP0_ADDR, PAGE_SIZE_1MB},
+ /* MEM_INTVECS_SYSM3, MEM_INTVECS_APPM3, MEM_CODE_SYSM3,
+ MEM_CODE_APPM3 */
+ {0, PAGE_SIZE_16MB},
+ /* MEM_CONST_SYSM3, MEM_CONST_APPM3, MEM_HEAP_SYSM3, MEM_HEAP_APPM3,
+ MEM_MPU_DUCATI_SHMEM, MEM_IPC_SHMEM */
+ {DUCATI_MEM_CONST_SYSM3_ADDR, (PAGE_SIZE_16MB * 2)},
+};
+
+
+void dbg_print_ptes(bool ashow_inv_entries, bool ashow_repeat_entries);
+int ducati_setup(void);
+void ducati_destroy(void);
+u32 get_ducati_virt_mem(void);
+void unmap_ducati_virt_mem(u32 shm_virt_addr);
+int ducati_mem_map(u32 va, u32 da, u32 num_bytes, u32 map_attr);
+int ducati_mem_unmap(u32 da, u32 num_bytes);
+u32 user_va2pa(struct mm_struct *mm, u32 address);
+inline u32 ducati_mem_virtToPhys(u32 da);
+#endif /* _DDUCATIMMU_ENABLER_H_*/
diff --git a/arch/arm/plat-omap/include/syslink/gate_remote.h b/arch/arm/plat-omap/include/syslink/gate_remote.h
new file mode 100644
index 000000000000..e8115d59535a
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/gate_remote.h
@@ -0,0 +1,34 @@
+/*
+ * gate_remote.h
+ *
+ * This includes the functions to handle remote gates
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NAMESERVER_REMOTE_H_
+#define _GATE_REMOTE_H_
+
+#include <linux/types.h>
+
+/*
+ * This function is used to enter in to a remote gate
+ */
+int gate_remote_enter(void *ghandle, u32 key);
+
+/*
+ * This function is used to leave from a remote gate
+ */
+int gate_remote_leave(void *ghandle, u32 key);
+
+#endif /* _GATE_REMOTE_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/gatepeterson.h b/arch/arm/plat-omap/include/syslink/gatepeterson.h
new file mode 100644
index 000000000000..f2e3f78bf146
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/gatepeterson.h
@@ -0,0 +1,167 @@
+/*
+ * gatepeterson.h
+ *
+ * The Gate Peterson Algorithm for mutual exclusion of shared memory.
+ * Current implementation works for 2 processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _GATEPETERSON_H_
+#define _GATEPETERSON_H_
+
+#include <linux/types.h>
+
+/*
+ * GATEPETERSON_MODULEID
+ * Unique module ID
+ */
+#define GATEPETERSON_MODULEID (0xF415)
+
+/*
+ * A set of context protection levels that each correspond to
+ * single processor gates used for local protection
+ */
+enum gatepeterson_protect {
+ GATEPETERSON_PROTECT_DEFAULT = 0,
+ GATEPETERSON_PROTECT_NONE = 1,
+ GATEPETERSON_PROTECT_INTERRUPT = 2,
+ GATEPETERSON_PROTECT_TASKLET = 3,
+ GATEPETERSON_PROTECT_THREAD = 4,
+ GATEPETERSON_PROTECT_PROCESS = 5,
+ GATEPETERSON_PROTECT_END_VALUE = 6
+};
+
+/*
+ * Structure defining config parameters for the Gate Peterson
+ * module
+ */
+struct gatepeterson_config {
+ enum gatepeterson_protect default_protection;
+ /*!< Default module-wide local context protection level. The level of
+ * protection specified here determines which local gate is created per
+ * GatePeterson instance for local protection during create. The instance
+ * configuration parameter may be usedto override this module setting per
+ * instance. The configuration used here should reflect both the context
+ * in which enter and leave are to be called,as well as the maximum level
+ * of protection needed locally.
+ */
+ u32 max_name_len; /* GP name len */
+ bool use_nameserver;
+ /*!< Whether to have this module use the NameServer or not. If the
+ * NameServer is not needed, set this configuration parameter to false.
+ * This informs GatePeterson not to pull in the NameServer module.
+ * In this case, all names passed into create and open are ignored.
+ */
+};
+
+/*
+ * Structure defining config parameters for the Gate Peterson
+ * instances
+ */
+struct gatepeterson_params {
+ void *shared_addr;
+ /* Address of the shared memory. The creator must supply a cache-aligned
+ * address in shared memory that will be used to store shared state
+ * information.
+ */
+
+ u32 shared_addr_size;
+ /* Size of the shared memory region. Can use gatepeterson_shared_memreq
+ * call to determine the required size.
+ */
+
+ char *name;
+ /* If using nameserver, name of this instance. The name (if not NULL) must
+ * be unique among all gatepeterson instances in the entire system.
+ */
+
+ enum gatepeterson_protect local_protection;
+ /* Local gate protection level. The default value, (Protect_DEFAULT)
+ * results in inheritance from module-level defaultProtection. This
+ * instance setting should be set to an alternative only if a different
+ * local protection level is needed for the instance.
+ */
+ bool use_nameserver;
+ /* Whether to have this module use the nameserver or not. If the
+ * nameserver is not needed, set this configuration parameter to
+ * false.This informs gatepeterson not to pull in the nameaerver
+ * module. In this case, all names passed into create and open are
+ * ignored.
+ */
+};
+
+/*
+ * Function to initialize the parameter structure
+ */
+void gatepeterson_get_config(struct gatepeterson_config *config);
+
+/*
+ * Function to initialize GP module
+ */
+int gatepeterson_setup(const struct gatepeterson_config *config);
+
+/*
+ * Function to destroy the GP module
+ */
+int gatepeterson_destroy(void);
+
+/*
+ * Function to initialize the parameter structure
+ */
+void gatepeterson_params_init(void *handle,
+ struct gatepeterson_params *params);
+
+/*
+ * Function to create an instance of GatePeterson
+ */
+void *gatepeterson_create(const struct gatepeterson_params *params);
+
+/*
+ * Function to delete an instance of GatePeterson
+ */
+int gatepeterson_delete(void **gphandle);
+
+/*
+ * Function to open a previously created instance
+ */
+int gatepeterson_open(void **gphandle,
+ struct gatepeterson_params *params);
+
+/*
+ * Function to close a previously opened instance
+ */
+int gatepeterson_close(void **gphandle);
+
+/*
+ * Function to enter the gate peterson
+ */
+u32 gatepeterson_enter(void *gphandle);
+
+/*
+ *Function to leave the gate peterson
+ */
+void gatepeterson_leave(void *gphandle, u32 flag);
+
+
+/*
+ * Returns the gatepeterson kernel object pointer
+ */
+void *gatepeterson_get_knl_handle(void **gpHandle);
+
+/*
+ * Function to return the shared memory requirement
+ */
+u32 gatepeterson_shared_memreq(const struct gatepeterson_params *params);
+
+#endif /* _GATEPETERSON_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/gatepeterson_ioctl.h b/arch/arm/plat-omap/include/syslink/gatepeterson_ioctl.h
new file mode 100644
index 000000000000..ed7ab86b75bc
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/gatepeterson_ioctl.h
@@ -0,0 +1,193 @@
+/*
+ * gatepeterson_ioctl.h
+ *
+ * The Gate Peterson Algorithm for mutual exclusion of shared memory.
+ * Current implementation works for 2 processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _GATEPETERSON_IOCTL_
+#define _GATEPETERSON_IOCTL_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <ipc_ioctl.h>
+#include <gatepeterson.h>
+
+enum CMD_GATEPETERSON {
+ GATEPETERSON_GETCONFIG = GATEPETERSON_BASE_CMD,
+ GATEPETERSON_SETUP,
+ GATEPETERSON_DESTROY,
+ GATEPETERSON_PARAMS_INIT,
+ GATEPETERSON_CREATE,
+ GATEPETERSON_DELETE,
+ GATEPETERSON_OPEN,
+ GATEPETERSON_CLOSE,
+ GATEPETERSON_ENTER,
+ GATEPETERSON_LEAVE,
+ GATEPETERSON_SHAREDMEMREQ
+};
+
+/*
+ * IOCTL command IDs for gatepeterson
+ */
+
+/*
+ * Command for gatepeterson_get_config
+ */
+#define CMD_GATEPETERSON_GETCONFIG _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_GETCONFIG, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_setup
+ */
+#define CMD_GATEPETERSON_SETUP _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_SETUP, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_setup
+ */
+#define CMD_GATEPETERSON_DESTROY _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_DESTROY, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_destroy
+ */
+#define CMD_GATEPETERSON_PARAMS_INIT _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_PARAMS_INIT, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_create
+ */
+#define CMD_GATEPETERSON_CREATE _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_CREATE, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_delete
+ */
+#define CMD_GATEPETERSON_DELETE _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_DELETE, \
+ struct gatepeterson_cmd_args)
+/*
+ * Command for gatepeterson_open
+ */
+#define CMD_GATEPETERSON_OPEN _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_OPEN, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_close
+ */
+#define CMD_GATEPETERSON_CLOSE _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_CLOSE, \
+ struct gatepeterson_cmd_args)
+/*
+ * Command for gatepeterson_enter
+ */
+#define CMD_GATEPETERSON_ENTER _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_ENTER, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_leave
+ */
+#define CMD_GATEPETERSON_LEAVE _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_LEAVE, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command for gatepeterson_shared_memreq
+ */
+#define CMD_GATEPETERSON_SHAREDMEMREQ _IOWR(IPC_IOC_MAGIC, \
+ GATEPETERSON_SHAREDMEMREQ, \
+ struct gatepeterson_cmd_args)
+
+/*
+ * Command arguments for gatepeterson
+ */
+union gatepeterson_arg {
+ struct {
+ void *handle;
+ struct gatepeterson_params *params;
+ } params_init;
+
+ struct {
+ struct gatepeterson_config *config;
+ } get_config;
+
+ struct {
+ struct gatepeterson_config *config;
+ } setup;
+
+ struct {
+ void *handle;
+ struct gatepeterson_params *params;
+ u32 name_len;
+ u32 shared_addr_srptr;
+ } create;
+
+ struct {
+ void *handle;
+ } delete;
+
+ struct {
+ void *handle;
+ struct gatepeterson_params *params;
+ u32 name_len;
+ u32 shared_addr_srptr;
+ } open;
+
+ struct {
+ void *handle;
+ } close;
+
+ struct {
+ void *handle;
+ u32 flags;
+ } enter;
+
+ struct {
+ void *handle;
+ u32 flags;
+ } leave;
+
+ struct {
+ void *handle;
+ struct gatepeterson_params *params;
+ u32 bytes;
+ } shared_memreq;
+
+};
+
+/*
+ * Command arguments for gatepeterson
+ */
+struct gatepeterson_cmd_args {
+ union gatepeterson_arg args;
+ s32 api_status;
+};
+
+/*
+ * This ioctl interface for gatepeterson module
+ */
+int gatepeterson_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _GATEPETERSON_IOCTL_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/gt.h b/arch/arm/plat-omap/include/syslink/gt.h
new file mode 100644
index 000000000000..95e3feb18e7b
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/gt.h
@@ -0,0 +1,320 @@
+
+/*
+ * gt.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+/*
+ * ======== gt.h ========
+ * Purpose:
+ * There are two definitions that affect which portions of trace
+ * are acutally compiled into the client: GT_TRACE and GT_ASSERT. If
+ * GT_TRACE is set to 0 then all trace statements (except for assertions)
+ * will be compiled out of the client. If GT_ASSERT is set to 0 then
+ * assertions will be compiled out of the client. GT_ASSERT can not be
+ * set to 0 unless GT_TRACE is also set to 0 (i.e. GT_TRACE == 1 implies
+ * GT_ASSERT == 1).
+ *
+ *! Revision History
+ *! ================
+ *! 02-Feb-2000 rr: Renamed this file to gtce.h. GT CLASS and trace
+ *! definitions are WinCE Specific.
+ *! 03-Jan-1997 ge Replaced "GT_" prefix to GT_Config structure members
+ *! to eliminate preprocessor confusion with other macros.
+ */
+#include <linux/types.h>
+#ifndef GT_
+#define GT_
+
+#ifndef GT_TRACE
+#define GT_TRACE 0 /* 0 = "trace compiled out"; 1 = "trace active" */
+#endif
+
+/* #include <syslink/host_os.h> */
+
+typedef s32(*Fxn)(); /* generic function type */
+
+
+#if !defined(GT_ASSERT) || GT_TRACE
+#define GT_ASSERT 1
+#endif
+
+struct GT_Config {
+ Fxn PRINTFXN;
+ Fxn PIDFXN;
+ Fxn TIDFXN;
+ Fxn ERRORFXN;
+};
+
+extern struct GT_Config *GT;
+
+struct gt_mask {
+ char *modName;
+ u8 *flags;
+} ;
+
+/*
+ * New GT Class defenitions.
+ *
+ * The following are the explanations and how it could be used in the code
+ *
+ * - GT_ENTER On Entry to Functions
+ *
+ * - GT_1CLASS Display level of debugging status- Object/Automatic
+ * variables
+ * - GT_2CLASS ---- do ----
+ *
+ * - GT_3CLASS ---- do ---- + It can be used(recommended) for debug
+ * status in the ISR, IST
+ * - GT_4CLASS ---- do ----
+ *
+ * - GT_5CLASS Display entry for module init/exit functions
+ *
+ * - GT_6CLASS Warn whenever SERVICES function fails
+ *
+ * - GT_7CLASS Warn failure of Critical failures
+ *
+ */
+
+#define GT_ENTER ((u8)0x01)
+#define GT_1CLASS ((u8)0x02)
+#define GT_2CLASS ((u8)0x04)
+#define GT_3CLASS ((u8)0x08)
+#define GT_4CLASS ((u8)0x10)
+#define GT_5CLASS ((u8)0x20)
+#define GT_6CLASS ((u8)0x40)
+#define GT_7CLASS ((u8)0x80)
+#define GT_LEAVE ((u8)0x02)
+
+#ifdef _LINT_
+
+/* LINTLIBRARY */
+
+/*
+ * ======== GT_assert ========
+ */
+/* ARGSUSED */
+void GT_assert(struct gt_mask mask, s32 expr)
+{
+}
+
+/*
+ * ======== GT_config ========
+ */
+/* ARGSUSED */
+void GT_config(struct GT_Config config)
+{
+}
+
+/*
+ * ======== GT_create ========
+ */
+/* ARGSUSED */
+void GT_create(struct gt_mask *mask, char *modName)
+{
+}
+
+/*
+ * ======== GT_curline ========
+ * Purpose:
+ * Returns the current source code line number. Is useful for performing
+ * branch testing using trace. For example,
+ *
+ * gt_1trace(curTrace, GT_1CLASS,
+ * "in module XX_mod, executing line %u\n", GT_curline());
+ */
+/* ARGSUSED */
+u16 GT_curline(void)
+{
+ return (u16)NULL;
+}
+
+/*
+ * ======== GT_exit ========
+ */
+/* ARGSUSED */
+void GT_exit(void)
+{
+}
+
+/*
+ * ======== GT_init ========
+ */
+/* ARGSUSED */
+void GT_init(void)
+{
+}
+
+/*
+ * ======== GT_query ========
+ */
+/* ARGSUSED */
+bool GT_query(struct gt_mask mask, u8 class)
+{
+ return false;
+}
+
+/*
+ * ======== GT_set ========
+ * sets trace mask according to settings
+ */
+
+/* ARGSUSED */
+void GT_set(char *settings)
+{
+}
+
+/*
+ * ======== GT_setprintf ========
+ * sets printf function
+ */
+
+/* ARGSUSED */
+void GT_setprintf(Fxn fxn)
+{
+}
+
+/* ARGSUSED */
+void gt_0trace(struct gt_mask mask, u8 class, char *format)
+{
+}
+
+/* ARGSUSED */
+void gt_1trace(struct gt_mask mask, u8 class, char *format, ...)
+{
+}
+
+/* ARGSUSED */
+void gt_2trace(struct gt_mask mask, u8 class, char *format, ...)
+{
+}
+
+/* ARGSUSED */
+void gt_3trace(struct gt_mask mask, u8 class, char *format, ...)
+{
+}
+
+/* ARGSUSED */
+void gt_4trace(struct gt_mask mask, u8 class, char *format, ...)
+{
+}
+
+/* ARGSUSED */
+void gt_5trace(struct gt_mask mask, u8 class, char *format, ...)
+{
+}
+
+/* ARGSUSED */
+void GT_6trace(struct gt_mask mask, u8 class, char *format, ...)
+{
+}
+
+#else
+
+#define GT_BOUND 26 /* 26 letters in alphabet */
+
+extern void _GT_create(struct gt_mask *mask, char *modName);
+
+#define GT_exit()
+
+extern void GT_init(void);
+extern void _GT_set(char *str);
+extern s32 _GT_trace(struct gt_mask *mask, char *format, ...);
+
+#if GT_ASSERT == 0
+
+#define GT_assert(mask, expr)
+#define GT_config(config)
+#define GT_configInit(config)
+#define GT_seterror(fxn)
+
+#else
+
+extern struct GT_Config _GT_params;
+
+#define GT_assert(mask, expr) \
+ (!(expr) ? \
+ printk(KERN_ALERT "assertion violation: %s, line %d\n", \
+ __FILE__, __LINE__), NULL : NULL)
+
+#define GT_config(config) (_GT_params = *(config))
+#define GT_configInit(config) (*(config) = _GT_params)
+#define GT_seterror(fxn) (_GT_params.ERRORFXN = (Fxn)(fxn))
+
+#endif
+
+#if GT_TRACE == 0
+
+#define GT_curline() ((u16)__LINE__)
+#define GT_create(mask, modName)
+#define GT_exit()
+#define GT_init()
+#define GT_set(settings)
+#define GT_setprintf(fxn)
+
+#define GT_query(mask, class) false
+
+#define gt_0trace(mask, class, format)
+#define gt_1trace(mask, class, format, arg1)
+#define gt_2trace(mask, class, format, arg1, arg2)
+#define gt_3trace(mask, class, format, arg1, arg2, arg3)
+#define gt_4trace(mask, class, format, arg1, arg2, arg3, arg4)
+#define gt_5trace(mask, class, format, arg1, arg2, arg3, arg4, arg5)
+#define GT_6trace(mask, class, format, arg1, arg2, arg3, arg4, arg5, arg6)
+
+#else /* GT_TRACE == 1 */
+
+#define GT_create(mask, modName) _GT_create((mask), (modName))
+#define GT_curline() ((u16)__LINE__)
+#define GT_set(settings) _GT_set(settings)
+#define GT_setprintf(fxn) (_GT_params.PRINTFXN = (Fxn)(fxn))
+
+#define GT_query(mask, class) ((*(mask).flags & (class)))
+
+#define gt_0trace(mask, class, format) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format)) : 0)
+
+#define gt_1trace(mask, class, format, arg1) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format), (arg1)) : 0)
+
+#define gt_2trace(mask, class, format, arg1, arg2) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format), (arg1), (arg2)) : 0)
+
+#define gt_3trace(mask, class, format, arg1, arg2, arg3) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3)) : 0)
+
+#define gt_4trace(mask, class, format, arg1, arg2, arg3, arg4) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3), (arg4)) : 0)
+
+#define gt_5trace(mask, class, format, arg1, arg2, arg3, arg4, arg5) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3), (arg4), (arg5)) \
+ : 0)
+
+#define GT_6trace(mask, class, format, arg1, arg2, arg3, arg4, arg5, arg6) \
+ ((*(mask).flags & (class)) ? \
+ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3), (arg4), (arg5), \
+ (arg6)) : 0)
+
+#endif /* GT_TRACE */
+
+#endif /* _LINT_ */
+
+#endif /* GTCE_ */
diff --git a/arch/arm/plat-omap/include/syslink/heap.h b/arch/arm/plat-omap/include/syslink/heap.h
new file mode 100644
index 000000000000..f03a692a71ab
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/heap.h
@@ -0,0 +1,91 @@
+/*
+ * heap.h
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _HEAP_H_
+#define _HEAP_H_
+
+#include <linux/types.h>
+
+/*
+ * Structure defining memory related statistics
+ */
+struct memory_stats{
+ u32 *total_size; /* Total memory size */
+ u32 *total_free_size; /* Total free memory size */
+ u32 *largest_free_size; /* Largest free memory size */
+};
+
+/*!
+ * ======== extendedstats ========
+ * Stats structure for the get_extended_stats API.
+ *
+ * max_allocated_blocks: The maximum number of blocks allocated
+ * from this heap at any single point in time during the lifetime of this
+ * heap instance.
+ *
+ * num_allocated_blocks: The total number of blocks currently
+ * allocated in this Heap instance.
+ */
+struct heap_extended_stats {
+ u32 max_allocated_blocks;
+ u32 num_allocated_blocks;
+};
+
+/*
+ * Structure defining config parameters for the heapbuf module
+ */
+struct heap_config {
+ u32 max_name_len; /* Maximum length of name */
+ bool track_max_allocs; /* Track the max number of allocated blocks */
+};
+
+/*
+ * Structure for the handle for the heap
+ */
+struct heap_object {
+ void* (*alloc) (void *handle, u32 size, u32 align);
+ int (*free) (void *handle, void *block, u32 size);
+ int (*get_stats) (void *handle, struct memory_stats *stats);
+ int (*get_extended_stats) (void *handle,
+ struct heap_extended_stats *stats);
+ void *obj;
+};
+
+/*
+ * Allocate a block
+ */
+void *heap_alloc(void *handle, u32 size, u32 align);
+
+/*
+ * Frees the block to this Heap
+ */
+int heap_free(void *handle, void *block, u32 size);
+
+/*
+ * Get heap statistics
+ */
+int heap_get_stats(void *handle, struct memory_stats *stats);
+
+/*
+ * Get heap extended statistics
+ */
+int heap_get_extended_stats(void *hphandle,
+ struct heap_extended_stats *stats);
+
+#endif /* _HEAP_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/heapbuf.h b/arch/arm/plat-omap/include/syslink/heapbuf.h
new file mode 100644
index 000000000000..3667c4675d49
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/heapbuf.h
@@ -0,0 +1,152 @@
+/*
+ * heapbuf.h
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _HEAPBUF_H_
+#define _HEAPBUF_H_
+
+#include <linux/types.h>
+#include <heap.h>
+#include <listmp.h>
+
+/*!
+ * @def LISTMP_MODULEID
+ * @brief Unique module ID.
+ */
+#define HEAPBUF_MODULEID (0x4cd5)
+
+/*
+ * Creation of Heap Buf succesful.
+*/
+#define HEAPBUF_CREATED (0x05251995)
+
+/*
+ * Version.
+ */
+#define HEAPBUF_VERSION (1)
+
+/*
+ * Structure defining config parameters for the HeapBuf module.
+ */
+struct heapbuf_config {
+ u32 max_name_len; /* Maximum length of name */
+ bool use_nameserver; /* To have this module use the NameServer or not */
+ bool track_max_allocs; /* Track the maximum number of allocated blocks */
+};
+
+/*
+ * Structure defining parameters for the HeapBuf module
+ */
+struct heapbuf_params {
+ void *gate;
+ bool exact; /* Only allocate on exact match of rquested size */
+ char *name; /* Name when using nameserver */
+ int resource_id; /* Resource id of the hardware linked list */
+ bool cache_flag; /* Whether to perform cache coherency calls */
+ u32 align; /* Alignment (in MAUs, power of 2) of each block */
+ u32 num_blocks; /* Number of fixed-size blocks */
+ u32 block_size; /* Size (in MAUs) of each block*/
+ void *shared_addr; /* Physical address of the shared memory */
+ u32 shared_addr_size; /* Size of shareAddr */
+ void *shared_buf; /* Physical address of the shared buffers */
+ u32 shared_buf_size; /* Size of sharedBuf */
+};
+
+/*
+ * Stats structure for the getExtendedStats API.
+ */
+struct heapbuf_extended_stats {
+ u32 max_allocated_blocks;
+ /* maximum number of blocks allocated from this heap instance */
+ u32 num_allocated_blocks;
+ /* total number of blocks currently allocated from this heap instance*/
+};
+
+
+/*
+ * Function to get default configuration for the heapbuf module
+ */
+int heapbuf_get_config(struct heapbuf_config *cfgparams);
+
+/*
+ * Function to setup the heapbuf module
+ */
+int heapbuf_setup(const struct heapbuf_config *cfg);
+
+/*
+ * Function to destroy the heapbuf module
+ */
+int heapbuf_destroy(void);
+
+/* Initialize this config-params structure with supplier-specified
+ * defaults before instance creation
+ */
+void heapbuf_params_init(void *handle, struct heapbuf_params *params);
+
+/*
+ * Creates a new instance of heapbuf module
+ */
+void *heapbuf_create(const struct heapbuf_params *params);
+
+/*
+ * Deletes a instance of heapbuf module
+ */
+int heapbuf_delete(void **handle_ptr);
+
+/*
+ * Opens a created instance of heapbuf module
+ */
+int heapbuf_open(void **handle_ptr, struct heapbuf_params *params);
+
+/*
+ * Closes previously opened/created instance of heapbuf module
+ */
+int heapbuf_close(void *handle_ptr);
+
+/*
+ * Returns the amount of shared memory required for creation
+ * of each instance
+ */
+int heapbuf_shared_memreq(const struct heapbuf_params *params, u32 *buf_size);
+
+/*
+ * Allocate a block
+ */
+void *heapbuf_alloc(void *hphandle, u32 size, u32 align);
+
+/*
+ * Frees the block to this heapbuf
+ */
+int heapbuf_free(void *hphandle, void *block, u32 size);
+
+/*
+ * Get memory statistics
+ */
+int heapbuf_get_stats(void *hphandle, struct memory_stats *stats);
+
+/*
+ * Indicate whether the heap may block during an alloc or free call
+ */
+bool heapbuf_isblocking(void *handle);
+
+/*
+ * Get extended statistics
+ */
+int heapbuf_get_extended_stats(void *hphandle,
+ struct heapbuf_extended_stats *stats);
+
+#endif /* _HEAPBUF_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/heapbuf_ioctl.h b/arch/arm/plat-omap/include/syslink/heapbuf_ioctl.h
new file mode 100644
index 000000000000..80165dcfc436
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/heapbuf_ioctl.h
@@ -0,0 +1,215 @@
+/*
+ * heapbuf_ioctl.h
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _HEAPBUF_IOCTL_
+#define _HEAPBUF_IOCTL_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <ipc_ioctl.h>
+#include <heap.h>
+#include <heapbuf.h>
+
+
+enum CMD_HEAPBUF {
+ HEAPBUF_GETCONFIG = HEAPBUF_BASE_CMD,
+ HEAPBUF_SETUP,
+ HEAPBUF_DESTROY,
+ HEAPBUF_PARAMS_INIT,
+ HEAPBUF_CREATE,
+ HEAPBUF_DELETE,
+ HEAPBUF_OPEN,
+ HEAPBUF_CLOSE,
+ HEAPBUF_ALLOC,
+ HEAPBUF_FREE,
+ HEAPBUF_SHAREDMEMREQ,
+ HEAPBUF_GETSTATS,
+ HEAPBUF_GETEXTENDEDSTATS
+};
+
+/*
+ * Command for heapbuf_get_config
+ */
+#define CMD_HEAPBUF_GETCONFIG _IOWR(IPC_IOC_MAGIC, HEAPBUF_GETCONFIG,\
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_setup
+ */
+#define CMD_HEAPBUF_SETUP _IOWR(IPC_IOC_MAGIC, HEAPBUF_SETUP, \
+ struct heapbuf_cmd_args)
+/*
+ * Command for heapbuf_destroy
+ */
+#define CMD_HEAPBUF_DESTROY _IOWR(IPC_IOC_MAGIC, HEAPBUF_DESTROY, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_prams_init
+ */
+#define CMD_HEAPBUF_PARAMS_INIT _IOWR(IPC_IOC_MAGIC, \
+ HEAPBUF_PARAMS_INIT, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_create
+ */
+#define CMD_HEAPBUF_CREATE _IOWR(IPC_IOC_MAGIC, HEAPBUF_CREATE, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_delete
+ */
+#define CMD_HEAPBUF_DELETE _IOWR(IPC_IOC_MAGIC, HEAPBUF_DELETE, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_open
+ */
+#define CMD_HEAPBUF_OPEN _IOWR(IPC_IOC_MAGIC, HEAPBUF_OPEN, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_close
+ */
+#define CMD_HEAPBUF_CLOSE _IOWR(IPC_IOC_MAGIC, HEAPBUF_CLOSE, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_alloc
+ */
+#define CMD_HEAPBUF_ALLOC _IOWR(IPC_IOC_MAGIC, HEAPBUF_ALLOC, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_free
+ */
+#define CMD_HEAPBUF_FREE _IOWR(IPC_IOC_MAGIC, HEAPBUF_FREE, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_shared_memreq
+ */
+#define CMD_HEAPBUF_SHAREDMEMREQ _IOWR(IPC_IOC_MAGIC, \
+ HEAPBUF_SHAREDMEMREQ, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_get_stats
+ */
+#define CMD_HEAPBUF_GETSTATS _IOWR(IPC_IOC_MAGIC, \
+ HEAPBUF_GETSTATS, \
+ struct heapbuf_cmd_args)
+
+/*
+ * Command for heapbuf_get_extended_stats
+ */
+#define CMD_HEAPBUF_GETEXTENDEDSTATS _IOWR(IPC_IOC_MAGIC, \
+ HEAPBUF_GETEXTENDEDSTATS, \
+ struct heapbuf_cmd_args)
+
+
+/*
+ * Command arguments for heapbuf
+ */
+union heapbuf_arg {
+ struct {
+ void *handle;
+ struct heapbuf_params *params;
+ } params_init;
+
+ struct {
+ struct heapbuf_config *config;
+ } get_config;
+
+ struct {
+ struct heapbuf_config *config;
+ } setup;
+
+ struct {
+ void *handle;
+ struct heapbuf_params *params;
+ u32 name_len;
+ u32 *shared_addr_srptr;
+ u32 *shared_buf_srptr;
+ void *knl_gate;
+ } create;
+
+ struct {
+ void *handle;
+ } delete;
+
+ struct {
+ void *handle;
+ struct heapbuf_params *params;
+ u32 name_len;
+ u32 *shared_addr_srptr;
+ void *knl_gate;
+ } open;
+
+ struct {
+ void *handle;
+ } close;
+
+ struct {
+ void *handle;
+ u32 size;
+ u32 align;
+ u32 *block_srptr;
+ } alloc;
+
+ struct {
+ void *handle;
+ u32 *block_srptr;
+ u32 size;
+ } free;
+
+ struct {
+ void *handle;
+ struct memory_stats *stats;
+ } get_stats;
+
+ struct {
+ void *handle;
+ struct heapbuf_extended_stats *stats;
+ } get_extended_stats;
+
+ struct {
+ void *handle;
+ struct heapbuf_params *params;
+ u32 buf_size;
+ u32 bytes;
+ } shared_memreq;
+};
+
+/*
+ * Command arguments for heapbuf
+ */
+struct heapbuf_cmd_args{
+ union heapbuf_arg args;
+ s32 api_status;
+};
+
+/*
+ * This ioctl interface for heapbuf module
+ */
+int heapbuf_ioctl(struct inode *pinode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _HEAPBUF_IOCTL_ */
diff --git a/arch/arm/plat-omap/include/syslink/host_os.h b/arch/arm/plat-omap/include/syslink/host_os.h
new file mode 100644
index 000000000000..2e2164f314fa
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/host_os.h
@@ -0,0 +1,72 @@
+
+/*
+ * host_os.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+/*
+ * ======== windows.h ========
+ *
+ *! Revision History
+ *! ================
+ *! 08-Mar-2004 sb Added cacheflush.h to support Dynamic Memory Mapping feature
+ *! 16-Feb-2004 sb Added headers required for consistent_alloc
+ */
+
+#ifndef _HOST_OS_H_
+#define _HOST_OS_H_
+
+#include <generated/autoconf.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <linux/syscalls.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+/* ----------------------------------- Macros */
+
+#define SEEK_SET 0 /* Seek from beginning of file. */
+#define SEEK_CUR 1 /* Seek from current position. */
+#define SEEK_END 2 /* Seek from end of file. */
+
+/* TODO -- Remove, once BP defines them */
+#define INT_MAIL_MPU_IRQ 26
+#define INT_DSP_MMU_IRQ 28
+
+#endif
diff --git a/arch/arm/plat-omap/include/syslink/hw_defs.h b/arch/arm/plat-omap/include/syslink/hw_defs.h
new file mode 100644
index 000000000000..440dbb14445e
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/hw_defs.h
@@ -0,0 +1,63 @@
+/*
+ * hw_defs.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef __HW_DEFS_H
+#define __HW_DEFS_H
+
+#include <syslink/GlobalTypes.h>
+
+/* Page size */
+#define HW_PAGE_SIZE_4KB 0x1000
+#define HW_PAGE_SIZE_64KB 0x10000
+#define HW_PAGE_SIZE_1MB 0x100000
+#define HW_PAGE_SIZE_16MB 0x1000000
+
+/* hw_status: return type for HW API */
+typedef long hw_status;
+
+/* hw_set_clear_t: Enumerated Type used to set and clear any bit */
+enum hw_set_clear_t {
+ HW_CLEAR,
+ HW_SET
+} ;
+
+/* hw_endianism_t: Enumerated Type used to specify the endianism
+ * Do NOT change these values. They are used as bit fields. */
+enum hw_endianism_t {
+ HW_LITTLE_ENDIAN,
+ HW_BIG_ENDIAN
+
+} ;
+
+/* hw_elemnt_siz_t: Enumerated Type used to specify the element size
+ * Do NOT change these values. They are used as bit fields. */
+enum hw_elemnt_siz_t {
+ HW_ELEM_SIZE_8BIT,
+ HW_ELEM_SIZE_16BIT,
+ HW_ELEM_SIZE_32BIT,
+ HW_ELEM_SIZE_64BIT
+
+} ;
+
+/* HW_IdleMode_t: Enumerated Type used to specify Idle modes */
+enum HW_IdleMode_t {
+ HW_FORCE_IDLE,
+ HW_NO_IDLE,
+ HW_SMART_IDLE
+} ;
+
+#endif /* __HW_DEFS_H */
diff --git a/arch/arm/plat-omap/include/syslink/hw_mbox.h b/arch/arm/plat-omap/include/syslink/hw_mbox.h
new file mode 100644
index 000000000000..f50ef782e66f
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/hw_mbox.h
@@ -0,0 +1,447 @@
+/*
+ * hw_mbox.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef __MBOX_H
+#define __MBOX_H
+
+#include <syslink/hw_defs.h>
+
+
+#define HW_MBOX_INT_NEW_MSG 0x1
+#define HW_MBOX_INT_NOT_FULL 0x2
+#define HW_MBOX_INT_ALL 0x3
+
+/*
+ * DEFINITION: HW_MBOX_MAX_NUM_MESSAGES
+ *
+ * DESCRIPTION: Maximum number of messages that mailbox can hald at a time.
+ *
+ *
+ */
+
+#define HW_MBOX_MAX_NUM_MESSAGES 4
+
+
+ /* width in bits of MBOX Id */
+#define HW_MBOX_ID_WIDTH 2
+
+
+/*
+ * TYPE: enum hw_mbox_id_t
+ *
+ * DESCRIPTION: Enumerated Type used to specify Mail Box Sub Module Id Number
+ *
+ *
+ */
+ enum hw_mbox_id_t {
+ HW_MBOX_ID_0,
+ HW_MBOX_ID_1,
+ HW_MBOX_ID_2,
+ HW_MBOX_ID_3,
+ HW_MBOX_ID_4,
+ HW_MBOX_ID_5
+ };
+
+/*
+ * TYPE: enum hw_mbox_userid_t
+ *
+ * DESCRIPTION: Enumerated Type used to specify Mail box User Id
+ *
+ *
+ */
+ enum hw_mbox_userid_t {
+ HW_MBOX_U0_ARM11,
+ HW_MBOX_U1_UMA,
+ HW_MBOX_U2_IVA,
+ HW_MBOX_U3_ARM11
+ };
+
+#if defined(OMAP3430)
+/*
+* TYPE: mailbox_context
+*
+* DESCRIPTION: Mailbox context settings
+*
+*
+*/
+struct mailbox_context {
+ unsigned long sysconfig;
+ unsigned long irqEnable0;
+ unsigned long irqEnable1;
+};
+#endif/* defined(OMAP3430)*/
+
+/*
+* FUNCTION : hw_mbox_msg_read
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to read
+*
+* OUTPUTS:
+*
+* Identifier : p_read_value
+* Type : unsigned long *const
+* Description : Value read from MailBox
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Invalid Id used
+* RET_EMPTY Mailbox empty
+*
+* PURPOSE:
+* : this function reads a unsigned long from the sub module message
+* box Specified. if there are no messages in the mailbox
+* then and error is returned.
+*
+*/
+extern long hw_mbox_msg_read(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ unsigned long *const p_read_value
+ );
+
+/*
+* FUNCTION : hw_mbox_msg_write
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to write
+*
+* Identifier : write_value
+* Type : const unsigned long
+* Description : Value to write to MailBox
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Invalid Id used
+*
+* PURPOSE:: this function writes a unsigned long from the sub module message
+* box Specified.
+*
+*
+*/
+extern long hw_mbox_msg_write(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ const unsigned long write_value
+ );
+
+/*
+* FUNCTION : hw_mbox_is_full
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to check
+*
+* OUTPUTS:
+*
+* Identifier : p_is_full
+* Type : unsigned long *const
+* Description : false means mail box not Full
+* true means mailbox full.
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Invalid Id used
+*
+* PURPOSE: : this function reads the full status register for mailbox.
+*
+*
+*/
+extern long hw_mbox_is_full(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ unsigned long *const p_is_full
+ );
+
+/* -----------------------------------------------------------------
+* FUNCTION : hw_mbox_nomsg_get
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to get num messages
+*
+* OUTPUTS:
+*
+* Identifier : p_num_msg
+* Type : unsigned long *const
+* Description : Number of messages in mailbox
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Inavlid ID input at parameter
+*
+* PURPOSE:
+* : this function gets number of messages in a specified mailbox.
+*
+*
+*/
+extern long hw_mbox_nomsg_get(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ unsigned long *const p_num_msg
+ );
+
+/*
+* FUNCTION : hw_mbox_event_enable
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to enable
+*
+* Identifier : user_id
+* Type : const enum hw_mbox_userid_t
+* Description : Mail box User Id to enable
+*
+* Identifier : enableIrq
+* Type : const unsigned long
+* Description : Irq value to enable
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM A Pointer Paramater was set to NULL
+* RET_INVALID_ID Invalid Id used
+*
+* PURPOSE: : this function enables the specified IRQ.
+*
+*
+*/
+extern long hw_mbox_event_enable(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ const enum hw_mbox_userid_t user_id,
+ const unsigned long events
+ );
+
+/*
+* FUNCTION : hw_mbox_event_disable
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to disable
+*
+* Identifier : user_id
+* Type : const enum hw_mbox_userid_t
+* Description : Mail box User Id to disable
+*
+* Identifier : enableIrq
+* Type : const unsigned long
+* Description : Irq value to disable
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM A Pointer Paramater was set to NULL
+* RET_INVALID_ID Invalid Id used
+*
+* PURPOSE: : this function disables the specified IRQ.
+*
+*
+*/
+extern long hw_mbox_event_disable(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ const enum hw_mbox_userid_t user_id,
+ const unsigned long events
+ );
+
+/*
+* FUNCTION : hw_mbox_event_status
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to clear
+*
+* Identifier : user_id
+* Type : const enum hw_mbox_userid_t
+* Description : Mail box User Id to clear
+*
+* OUTPUTS:
+*
+* Identifier : pIrqStatus
+* Type : pMBOX_Int_t *const
+* Description : The value in IRQ status register
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Invalid Id used
+*
+* PURPOSE: : this function gets the status of the specified IRQ.
+*
+*
+*/
+extern long hw_mbox_event_status(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ const enum hw_mbox_userid_t user_id,
+ unsigned long *const p_eventStatus
+ );
+
+/*
+* FUNCTION : hw_mbox_event_ack
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+* Identifier : mail_box_id
+* Type : const enum hw_mbox_id_t
+* Description : Mail Box Sub module Id to set
+*
+* Identifier : user_id
+* Type : const enum hw_mbox_userid_t
+* Description : Mail box User Id to set
+*
+* Identifier : irqStatus
+* Type : const unsigned long
+* Description : The value to write IRQ status
+*
+* OUTPUTS:
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address Paramater was set to 0
+* RET_INVALID_ID Invalid Id used
+*
+* PURPOSE: : this function sets the status of the specified IRQ.
+*
+*
+*/
+extern long hw_mbox_event_ack(
+ const unsigned long base_address,
+ const enum hw_mbox_id_t mail_box_id,
+ const enum hw_mbox_userid_t user_id,
+ const unsigned long event
+ );
+
+#if defined(OMAP3430)
+/* ---------------------------------------------------------------
+* FUNCTION : hw_mbox_save_settings
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Invalid Id used
+* RET_EMPTY Mailbox empty
+*
+* PURPOSE: : this function saves the context of mailbox
+*
+* ----------------------------------------------------------------
+*/
+extern long hw_mbox_save_settings(unsigned long baseAddres);
+
+/*
+* FUNCTION : hw_mbox_restore_settings
+*
+* INPUTS:
+*
+* Identifier : base_address
+* Type : const unsigned long
+* Description : Base Address of instance of Mailbox module
+*
+*
+* RETURNS:
+*
+* Type : ReturnCode_t
+* Description : RET_OK No errors occured
+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL
+* RET_INVALID_ID Invalid Id used
+* RET_EMPTY Mailbox empty
+*
+* PURPOSE: : this function restores the context of mailbox
+*
+*
+*/
+extern long hw_mbox_restore_settings(unsigned long baseAddres);
+#endif/* defined(OMAP3430)*/
+
+#endif /* __MBOX_H */
+
diff --git a/arch/arm/plat-omap/include/syslink/hw_mmu.h b/arch/arm/plat-omap/include/syslink/hw_mmu.h
new file mode 100644
index 000000000000..3463fe1e4086
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/hw_mmu.h
@@ -0,0 +1,171 @@
+/*
+ * hw_mbox.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef __HW_MMU_H
+#define __HW_MMU_H
+
+#include <linux/types.h>
+
+/* Bitmasks for interrupt sources */
+#define HW_MMU_TRANSLATION_FAULT 0x2
+#define HW_MMU_ALL_INTERRUPTS 0x1F
+
+#define HW_MMU_COARSE_PAGE_SIZE 0x400
+
+/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
+ CPU/TLB Element size */
+enum hw_mmu_mixed_size_t {
+ HW_MMU_TLBES,
+ HW_MMU_CPUES
+
+} ;
+
+/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
+struct hw_mmu_map_attrs_t {
+ enum hw_endianism_t endianism;
+ enum hw_elemnt_siz_t element_size;
+ enum hw_mmu_mixed_size_t mixedSize;
+} ;
+
+extern hw_status hw_mmu_enable(const u32 base_address);
+
+extern hw_status hw_mmu_disable(const u32 base_address);
+
+extern hw_status hw_mmu_numlocked_set(const u32 base_address,
+ u32 num_lcked_entries);
+
+extern hw_status hw_mmu_victim_numset(const u32 base_address,
+ u32 vctm_entry_num);
+
+/* For MMU faults */
+extern hw_status hw_mmu_eventack(const u32 base_address,
+ u32 irq_mask);
+
+extern hw_status hw_mmu_event_disable(const u32 base_address,
+ u32 irq_mask);
+
+extern hw_status hw_mmu_event_enable(const u32 base_address,
+ u32 irq_mask);
+
+extern hw_status hw_mmu_event_status(const u32 base_address,
+ u32 *irq_mask);
+
+extern hw_status hw_mmu_flt_adr_rd(const u32 base_address,
+ u32 *addr);
+
+/* Set the TT base address */
+extern hw_status hw_mmu_ttbset(const u32 base_address,
+ u32 ttb_phys_addr);
+
+extern hw_status hw_mmu_twl_enable(const u32 base_address);
+
+extern hw_status hw_mmu_twl_disable(const u32 base_address);
+
+extern hw_status hw_mmu_tlb_flush(const u32 base_address,
+ u32 virtual_addr,
+ u32 page_size);
+
+extern hw_status hw_mmu_tlb_flushAll(const u32 base_address);
+
+extern hw_status hw_mmu_tlb_add(const u32 base_address,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_size,
+ u32 entryNum,
+ struct hw_mmu_map_attrs_t *map_attrs,
+ enum hw_set_clear_t preserve_bit,
+ enum hw_set_clear_t valid_bit);
+
+
+/* For PTEs */
+extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_size,
+ struct hw_mmu_map_attrs_t *map_attrs);
+
+extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
+ u32 pg_size,
+ u32 virtual_addr);
+
+static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
+{
+ u32 pte_addr;
+ u32 VA_31_to_20;
+
+ VA_31_to_20 = va >> (20 - 2); /* Left-shift by 2 here itself */
+ VA_31_to_20 &= 0xFFFFFFFCUL;
+ pte_addr = l1_base + VA_31_to_20;
+
+ return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
+{
+ u32 pte_addr;
+
+ pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
+
+ return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_coarsel1(u32 pte_val)
+{
+ u32 pteCoarse;
+
+ pteCoarse = pte_val & 0xFFFFFC00;
+
+ return pteCoarse;
+}
+
+static inline u32 hw_mmu_pte_sizel1(u32 pte_val)
+{
+ u32 pte_size = 0;
+
+ if ((pte_val & 0x3) == 0x1) {
+ /* Points to L2 PT */
+ pte_size = HW_MMU_COARSE_PAGE_SIZE;
+ }
+
+ if ((pte_val & 0x3) == 0x2) {
+ if (pte_val & (1 << 18))
+ pte_size = HW_PAGE_SIZE_16MB;
+ else
+ pte_size = HW_PAGE_SIZE_1MB;
+ }
+
+ return pte_size;
+}
+
+static inline u32 hw_mmu_pte_sizel2(u32 pte_val)
+{
+ u32 pte_size = 0;
+
+ if (pte_val & 0x2)
+ pte_size = HW_PAGE_SIZE_4KB;
+ else if (pte_val & 0x1)
+ pte_size = HW_PAGE_SIZE_64KB;
+
+ return pte_size;
+}
+extern hw_status hw_mmu_tlb_dump(u32 base_address, bool shw_inv_entries);
+
+extern u32 hw_mmu_pte_phyaddr(u32 pte_val, u32 pte_size);
+
+extern u32 hw_mmu_fault_dump(const u32 base_address);
+
+#endif /* __HW_MMU_H */
diff --git a/arch/arm/plat-omap/include/syslink/hw_ocp.h b/arch/arm/plat-omap/include/syslink/hw_ocp.h
new file mode 100644
index 000000000000..7277bbfcde33
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/hw_ocp.h
@@ -0,0 +1,60 @@
+/*
+ * hw_ocp.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+#ifndef __HW_OCP_H
+#define __HW_OCP_H
+
+#include <syslink/GlobalTypes.h>
+#include <syslink/hw_ocp.h>
+#include <syslink/hw_defs.h>
+#include <syslink/MBXRegAcM.h>
+#include <syslink/MBXAccInt.h>
+
+
+/*
+* TYPE: HW_IdleMode_t
+*
+* DESCRIPTION: Enumerated Type for idle modes in OCP SYSCONFIG register
+*
+*
+*/
+enum hal_ocp_idlemode_t {
+ HW_OCP_FORCE_IDLE,
+ HW_OCP_NO_IDLE,
+ HW_OCP_SMART_IDLE
+};
+
+extern long hw_ocp_soft_reset(const unsigned long base_address);
+
+extern long hw_ocp_soft_reset_isdone(const unsigned long base_address,
+ unsigned long *reset_is_done);
+
+extern long hw_ocp_idle_modeset(const unsigned long base_address,
+ enum hal_ocp_idlemode_t idle_mode);
+
+extern long hw_ocp_idlemode_get(const unsigned long base_address,
+ enum hal_ocp_idlemode_t *idle_mode);
+
+extern long hw_ocp_autoidle_set(const unsigned long base_address,
+ enum hw_set_clear_t auto_idle);
+
+extern long hw_ocp_autoidle_get(const unsigned long base_address,
+ enum hw_set_clear_t *auto_idle);
+
+#endif /* __HW_OCP_H */
+
diff --git a/arch/arm/plat-omap/include/syslink/ipc_ioctl.h b/arch/arm/plat-omap/include/syslink/ipc_ioctl.h
new file mode 100644
index 000000000000..5a5078fcf3bd
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/ipc_ioctl.h
@@ -0,0 +1,92 @@
+/*
+ * ipc_ioctl.h
+ *
+ * Base file for all TI OMAP IPC ioctl's.
+ * Linux-OMAP IPC has allocated base 0xEE with a range of 0x00-0xFF.
+ * (need to get the real one from open source maintainers)
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _IPC_IOCTL_H
+#define _IPC_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+
+#define IPC_IOC_MAGIC 0xE0
+#define IPC_IOC_BASE 2
+
+enum ipc_command_count {
+ MULTIPROC_CMD_NOS = 4,
+ NAMESERVER_CMD_NOS = 13,
+ HEAPBUF_CMD_NOS = 13,
+ SHAREDREGION_CMD_NOS = 10,
+ GATEPETERSON_CMD_NOS = 11,
+ LISTMP_SHAREDMEMORY_CMD_NOS = 18,
+ MESSAGEQ_CMD_NOS = 17,
+ MESSAGEQ_TRANSPORTSHM_CMD_NOS = 9,
+ NAMESERVERREMOTENOTIFY_CMD_NOS = 8,
+ SYSMGR_CMD_NOS = 5,
+ SYSMEMMGR_CMD_NOS = 6
+};
+
+enum ipc_command_ranges {
+ MULTIPROC_BASE_CMD = IPC_IOC_BASE,
+ MULTIPROC_END_CMD = (MULTIPROC_BASE_CMD + \
+ MULTIPROC_CMD_NOS - 1),
+
+ NAMESERVER_BASE_CMD = 10,
+ NAMESERVER_END_CMD = (NAMESERVER_BASE_CMD + \
+ NAMESERVER_CMD_NOS - 1),
+
+ HEAPBUF_BASE_CMD = 30,
+ HEAPBUF_END_CMD = (HEAPBUF_BASE_CMD + \
+ HEAPBUF_CMD_NOS - 1),
+
+ SHAREDREGION_BASE_CMD = 50,
+ SHAREDREGION_END_CMD = (SHAREDREGION_BASE_CMD + \
+ SHAREDREGION_CMD_NOS - 1),
+
+ GATEPETERSON_BASE_CMD = 70,
+ GATEPETERSON_END_CMD = (GATEPETERSON_BASE_CMD + \
+ GATEPETERSON_CMD_NOS - 1),
+
+ LISTMP_SHAREDMEMORY_BASE_CMD = 90,
+ LISTMP_SHAREDMEMORY_END_CMD = (LISTMP_SHAREDMEMORY_BASE_CMD + \
+ LISTMP_SHAREDMEMORY_CMD_NOS - 1),
+
+ MESSAGEQ_BASE_CMD = 110,
+ MESSAGEQ_END_CMD = (MESSAGEQ_BASE_CMD + \
+ MESSAGEQ_CMD_NOS - 1),
+
+ MESSAGEQ_TRANSPORTSHM_BASE_CMD = 130,
+ MESSAGEQ_TRANSPORTSHM_END_CMD = (MESSAGEQ_TRANSPORTSHM_BASE_CMD + \
+ MESSAGEQ_TRANSPORTSHM_CMD_NOS - 1),
+
+ NAMESERVERREMOTENOTIFY_BASE_CMD = 160,
+ NAMESERVERREMOTENOTIFY_END_CMD = (NAMESERVERREMOTENOTIFY_BASE_CMD + \
+ NAMESERVERREMOTENOTIFY_CMD_NOS - 1),
+
+ SYSMGR_BASE_CMD = 170,
+ SYSMGR_END_CMD = (SYSMGR_BASE_CMD + \
+ SYSMGR_CMD_NOS - 1),
+
+ SYSMEMMGR_BASE_CMD = 180,
+ SYSMEMMGR_END_CMD = (SYSMEMMGR_BASE_CMD + \
+ SYSMEMMGR_CMD_NOS - 1)
+};
+
+int ipc_ioc_router(u32 cmd, ulong arg);
+
+#endif /* _IPC_IOCTL_H */
diff --git a/arch/arm/plat-omap/include/syslink/listmp.h b/arch/arm/plat-omap/include/syslink/listmp.h
new file mode 100644
index 000000000000..536f1804da91
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/listmp.h
@@ -0,0 +1,267 @@
+/*
+ * listmp.h
+ *
+ * The listmp module defines the shared memory doubly linked list.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _LISTMP_H_
+#define _LISTMP_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/list.h>
+/*#include <heap.h>*/
+
+/* =============================================================================
+ * All success and failure codes for the module
+ * =============================================================================
+ */
+/*!
+ * @def LISTMP_MODULEID
+ * @brief Unique module ID.
+ */
+#define LISTMP_MODULEID (0xa413)
+
+/*!
+ * @def LISTMP_ERRORCODEBASE
+ * @brief Error code base for ListMP.
+ */
+#define LISTMP_ERRORCODEBASE (LISTMP_MODULEID << 12)
+
+/*!
+ * @def LISTMP_MAKE_FAILURE
+ * @brief Macro to make error code.
+ */
+#define LISTMP_MAKE_FAILURE(x) ((int)(0x80000000 \
+ + (LISTMP_ERRORCODEBASE + (x))))
+
+/*!
+ * @def LISTMP_MAKE_SUCCESS
+ * @brief Macro to make success code.
+ */
+#define LISTMP_MAKE_SUCCESS(x) (LISTMP_ERRORCODEBASE + (x))
+
+/*!
+ * @def LISTMP_E_INVALIDARG
+ * @brief Argument passed to a function is invalid.
+ */
+#define LISTMP_E_INVALIDARG LISTMP_MAKE_FAILURE(1)
+
+/*!
+ * @def LISTMP_E_MEMORY
+ * @brief Memory allocation failed.
+ */
+#define LISTMP_E_MEMORY LISTMP_MAKE_FAILURE(2)
+
+/*!
+ * @def LISTMP_E_BUSY
+ * @brief The name is already registered or not.
+ */
+#define LISTMP_E_BUSY LISTMP_MAKE_FAILURE(3)
+
+/*!
+ * @def LISTMP_E_FAIL
+ * @brief Generic failure.
+ */
+#define LISTMP_E_FAIL LISTMP_MAKE_FAILURE(4)
+
+/*!
+ * @def LISTMP_E_NOTFOUND
+ * @brief Name not found in the nameserver.
+ */
+#define LISTMP_E_NOTFOUND LISTMP_MAKE_FAILURE(5)
+
+/*!
+ * @def LISTMP_E_INVALIDSTATE
+ * @brief Module is not initialized.
+ */
+#define LISTMP_E_INVALIDSTATE LISTMP_MAKE_FAILURE(6)
+
+/*!
+ * @def LISTMP_E_OSFAILURE
+ * @brief Failure in OS call.
+ */
+#define LISTMP_E_OSFAILURE LISTMP_MAKE_FAILURE(7)
+
+/*!
+ * @def LISTMP_SUCCESS
+ * @brief Operation successful.
+ */
+#define LISTMP_SUCCESS LISTMP_MAKE_SUCCESS(0)
+
+/*!
+ * @def LISTMP_S_ALREADYSETUP
+ * @brief The LISTMP module has already been setup in this process.
+ */
+#define LISTMP_S_ALREADYSETUP LISTMP_MAKE_SUCCESS(1)
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+/*!
+ * @brief Enum defining types of list for the ListMP module.
+ */
+enum listmp_type {
+ listmp_type_SHARED = 0,
+ /*!< List in shared memory */
+ listmp_type_FAST = 1
+ /*!< Hardware Queue */
+};
+
+/*!
+ * @brief Structure defining config parameters for the ListMP module.
+ */
+struct listmp_config {
+ u32 max_name_len;
+ /*!< Maximum length of name */
+ bool use_name_server;
+ /*!< Whether to have this module use the NameServer or not. If the
+ * NameServer is not needed, set this configuration parameter to false.
+ * This informs ListMPSharedMemory not to pull in the NameServer module
+ * In this case, all names passed into create and open are ignored.
+ */
+};
+
+/*!
+ * @brief Structure defining list element for the ListMP.
+ */
+struct listmp_elem {
+ volatile struct listmp_elem *next;
+ volatile struct listmp_elem *prev;
+};
+
+/*!
+ * @brief Structure defining config parameters for the ListMP instances.
+ */
+struct listmp_params {
+ bool cache_flag;
+ /*!< Set to 1 by the open() call. No one else should touch this! */
+ struct mutex *gate;
+ /*!< Lock used for critical region management of the list */
+ void *shared_addr;
+ /*!< shared memory address */
+ u32 shared_addr_size;
+ /*!< shared memory size */
+ char *name;
+ /*!< Name of the object */
+ int resource_id;
+ /*!<
+ * resourceId Specifies the resource id number.
+ * Parameter is used only when type is set to Fast List
+ */
+ enum listmp_type list_type ;
+ /*!< Type of list */
+};
+
+
+/* =============================================================================
+ * Forward declarations
+ * =============================================================================
+ */
+/*!
+ * @brief Structure defining config parameters for the ListMPSharedMemory.
+ */
+struct listmp_object {
+ bool (*empty)(void *listmp_handle);
+ /* Function to check if list is empty */
+ void *(*get_head)(void *listmp_handle);
+ /* Function to get head element from list */
+ void *(*get_tail)(void *listmp_handle);
+ /* Function to get tail element from list */
+ int (*put_head)(void *listmp_handle, struct listmp_elem *elem);
+ /* Function to put head element into list */
+ int (*put_tail)(void *listmp_handle, struct listmp_elem *elem);
+ /* Function to put tail element into list */
+ int (*insert)(void *listmp_handle, struct listmp_elem *elem,
+ struct listmp_elem *curElem);
+ /* Function to insert element into list */
+ int (*remove)(void *listmp_handle, struct listmp_elem *elem);
+ /* Function to remove element from list */
+ void *(*next)(void *listmp_handle, struct listmp_elem *elem);
+ /* Function to traverse to next element in list */
+ void *(*prev)(void *listmp_handle, struct listmp_elem *elem);
+ /* Function to traverse to prev element in list */
+ void *obj;
+ /*!< Handle to ListMP */
+ enum listmp_type list_type;
+ /* Type of list */
+};
+
+/*
+ * Function initializes listmp parameters
+ */
+void listmp_params_init(void *listmp_handle,
+ struct listmp_params *params);
+
+/*
+ * Function to get shared memory requirement for the module
+ */
+int listmp_shared_memreq(struct listmp_params *params);
+
+/* =============================================================================
+ * Functions to create instance of a list
+ * =============================================================================
+ */
+/* Function to create an instance of ListMP */
+void *listmp_create(struct listmp_params *params);
+
+/* Function to delete an instance of ListMP */
+int listmp_delete(void **listmp_handle_ptr);
+
+/* =============================================================================
+ * Functions to open/close handle to list instance
+ * =============================================================================
+ */
+/* Function to open a previously created instance */
+int listmp_open(void **listmp_handle_ptr, struct listmp_params *params);
+
+/* Function to close a previously opened instance */
+int listmp_close(void *listmp_handle);
+
+/* =============================================================================
+ * Function pointer types for list operations
+ * =============================================================================
+ */
+/* Function to check if list is empty */
+bool listmp_empty(void *listmp_handle);
+
+/* Function to get head element from list */
+void *listmp_get_head(void *listmp_handle);
+
+/* Function to get tail element from list */
+void *listmp_get_tail(void *listmp_handle);
+
+/* Function to put head element into list */
+int listmp_put_head(void *listmp_handle, struct listmp_elem *elem);
+
+/* Function to put tail element into list */
+int listmp_put_tail(void *listmp_handle, struct listmp_elem *elem);
+
+/* Function to insert element into list */
+int listmp_insert(void *listmp_handle, struct listmp_elem *elem,
+ struct listmp_elem *curElem);
+
+/* Function to traverse to remove element from list */
+int listmp_remove(void *listmp_handle, struct listmp_elem *elem);
+
+/* Function to traverse to next element in list */
+void *listmp_next(void *listmp_handle, struct listmp_elem *elem);
+
+/* Function to traverse to prev element in list */
+void *listmp_prev(void *listmp_handle, struct listmp_elem *elem);
+
+#endif /* _LISTMP_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/listmp_sharedmemory.h b/arch/arm/plat-omap/include/syslink/listmp_sharedmemory.h
new file mode 100644
index 000000000000..c6fd1629728e
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/listmp_sharedmemory.h
@@ -0,0 +1,289 @@
+/*
+ * listmp_sharedmemory.c
+ *
+ * The listmp_sharedmemory is a double linked-list based module designed to be
+ * used in a multi-processor environment. It is designed to provide a means
+ * of communication between different processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _LISTMP_SHAREDMEMORY_H_
+#define _LISTMP_SHAREDMEMORY_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+
+/* Other headers */
+#include <listmp.h>
+
+/* =============================================================================
+ * All success and failure codes for the module
+ * =============================================================================
+ */
+/*!
+ * @def LISTMPSHAREDMEMORY_MODULEID
+ * @brief Unique module ID.
+ */
+#define LISTMPSHAREDMEMORY_MODULEID (0xDD3C)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_ERRORCODEBASE
+ * @brief Error code base for ListMPSharedMemory.
+ */
+#define LISTMPSHAREDMEMORY_ERRORCODEBASE \
+ (LISTMPSHAREDMEMORY_MODULEID << 12)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_MAKE_FAILURE
+ * @brief Macro to make error code.
+ */
+#define LISTMPSHAREDMEMORY_MAKE_FAILURE(x) \
+ ((int) (0x80000000 \
+ + (LISTMPSHAREDMEMORY_ERRORCODEBASE \
+ + (x))))
+
+/*!
+ * @def LISTMPSHAREDMEMORY_MAKE_SUCCESS
+ * @brief Macro to make success code.
+ */
+#define LISTMPSHAREDMEMORY_MAKE_SUCCESS(x) \
+ (LISTMPSHAREDMEMORY_ERRORCODEBASE + (x))
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_INVALIDARG
+ * @brief Argument passed to a function is invalid.
+ */
+#define LISTMPSHAREDMEMORY_E_INVALIDARG \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(1)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_MEMORY
+ * @brief Memory allocation failed.
+ */
+#define LISTMPSHAREDMEMORY_E_MEMORY \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(2)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_FAIL
+ * @brief Generic failure.
+ */
+#define LISTMPSHAREDMEMORY_E_FAIL \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(3)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_INVALIDSTATE
+ * @brief Module is not initialized.
+ */
+#define LISTMPSHAREDMEMORY_E_INVALIDSTATE \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(4)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_OSFAILURE
+ * @brief Failure in OS call.
+ */
+#define LISTMPSHAREDMEMORY_E_OSFAILURE \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(5)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_NOTONWER
+ * @brief Instance is not created on this processor.
+ */
+#define LISTMPSHAREDMEMORY_E_NOTOWNER \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(6)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_REMOTEACTIVE
+ * @brief Remote opener of the instance has not closed the instance.
+ */
+#define LISTMPSHAREDMEMORY_E_REMOTEACTIVE \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(7)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_INUSE
+ * @brief Indicates that the instance is in use..
+ */
+#define LISTMPSHAREDMEMORY_E_INUSE \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(8)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_NOTFOUND
+ * @brief name not found in the nameserver.
+ */
+#define LISTMPSHAREDMEMORY_E_NOTFOUND \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(9)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_NOTCREATED
+ * @brief Instance is not created yet
+ */
+#define LISTMPSHAREDMEMORY_E_NOTCREATED \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(10)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_VERSION
+ * @brief Version mismatch error.
+ */
+#define LISTMPSHAREDMEMORY_E_VERSION \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(11)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_E_BUSY
+ * @brief the name is already registered or not.
+ */
+#define LISTMPSHAREDMEMORY_E_BUSY \
+ LISTMPSHAREDMEMORY_MAKE_FAILURE(12)
+
+
+/*!
+ * @def LISTMPSHAREDMEMORY_SUCCESS
+ * @brief Operation successful.
+ */
+#define LISTMPSHAREDMEMORY_SUCCESS \
+ LISTMPSHAREDMEMORY_MAKE_SUCCESS(0)
+
+/*!
+ * @def LISTMPSHAREDMEMORY_S_ALREADYSETUP
+ * @brief The LISTMPSHAREDMEMORY module has already been setup in this
+ * process.
+ */
+#define LISTMPSHAREDMEMORY_S_ALREADYSETUP \
+ LISTMPSHAREDMEMORY_MAKE_SUCCESS(1)
+
+/*!
+ * @def listmp_sharedmemory_CREATED
+ * @brief Creation of list succesful.
+*/
+#define LISTMP_SHAREDMEMORY_CREATED (0x12181964)
+
+/*!
+ * @def LISTMP_SHAREDMEMORY_VERSION
+ * @brief Version.
+ */
+#define LISTMP_SHAREDMEMORY_VERSION (1)
+
+/* =============================================================================
+ * Structure definitions
+ * =============================================================================
+ */
+/*!
+ * @brief Structure defining config parameters for the ListMP instances.
+ */
+#define listmp_sharedmemory_params struct listmp_params
+
+
+/*! @brief Forward declaration of structure defining object for the
+ * ListMPSharedMemory.
+ */
+/*!
+ * @brief Object for the ListMPSharedMemory Handle
+ */
+#define listmp_sharedmemory_object struct listmp_object
+
+/*!
+ * @brief Handle for the ListMPSharedMemory
+ */
+#define listmp_sharedmemory_handle void *
+
+/* =============================================================================
+ * Functions to create the module
+ * =============================================================================
+ */
+/* Function to get configuration parameters to setup
+ * the ListMPSharedMemory module.
+ */
+int listmp_sharedmemory_get_config(struct listmp_config *cfgParams);
+
+/* Function to setup the ListMPSharedMemory module. */
+int listmp_sharedmemory_setup(struct listmp_config *config) ;
+
+/* Function to destroy the ListMPSharedMemory module. */
+int listmp_sharedmemory_destroy(void);
+
+/* =============================================================================
+ * Functions to create instance of a list
+ * =============================================================================
+ */
+/* Function to create an instance of ListMP */
+listmp_sharedmemory_handle listmp_sharedmemory_create
+ (listmp_sharedmemory_params *params);
+
+/* Function to delete an instance of ListMP */
+int listmp_sharedmemory_delete(listmp_sharedmemory_handle *listMPHandlePtr);
+
+/* =============================================================================
+ * Functions to open/close handle to list instance
+ * =============================================================================
+ */
+/*
+ * Initialize this config-params structure with supplier-specified
+ * defaults before instance creation.
+ */
+void listmp_sharedmemory_params_init(listmp_sharedmemory_handle handle,
+ listmp_sharedmemory_params *params);
+
+/* Function to open a previously created instance */
+int listmp_sharedmemory_open(listmp_sharedmemory_handle *listMpHandlePtr,
+ listmp_sharedmemory_params *params);
+
+/* Function to close a previously opened instance */
+int listmp_sharedmemory_close(listmp_sharedmemory_handle listMPHandle);
+
+/* =============================================================================
+ * Functions for list operations
+ * =============================================================================
+ */
+/* Function to check if list is empty */
+bool listmp_sharedmemory_empty(listmp_sharedmemory_handle listMPHandle);
+
+/* Function to get head element from list */
+void *listmp_sharedmemory_get_head(listmp_sharedmemory_handle listMPHandle);
+
+/* Function to get tail element from list */
+void *listmp_sharedmemory_get_tail(listmp_sharedmemory_handle listMPHandle);
+
+/* Function to put head element into list */
+int listmp_sharedmemory_put_head(listmp_sharedmemory_handle listMPHandle,
+ struct listmp_elem *elem);
+
+/* Function to put tail element into list */
+int listmp_sharedmemory_put_tail(listmp_sharedmemory_handle listMPHandle,
+ struct listmp_elem *elem);
+
+/* Function to insert element into list */
+int listmp_sharedmemory_insert(listmp_sharedmemory_handle listMPHandle,
+ struct listmp_elem *elem,
+ struct listmp_elem *curElem);
+
+/* Function to traverse to remove element from list */
+int listmp_sharedmemory_remove(listmp_sharedmemory_handle listMPHandle,
+ struct listmp_elem *elem);
+
+/* Function to traverse to next element in list */
+void *listmp_sharedmemory_next(listmp_sharedmemory_handle listMPHandle,
+ struct listmp_elem *elem);
+
+/* Function to traverse to prev element in list */
+void *listmp_sharedmemory_prev(listmp_sharedmemory_handle listMPHandle,
+ struct listmp_elem *elem);
+
+/* =============================================================================
+ * Functions for shared memory requirements
+ * =============================================================================
+ */
+/* Amount of shared memory required for creation of each instance. */
+int listmp_sharedmemory_shared_memreq(
+ listmp_sharedmemory_params *params);
+
+#endif /* _LISTMP_SHAREDMEMORY_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/listmp_sharedmemory_ioctl.h b/arch/arm/plat-omap/include/syslink/listmp_sharedmemory_ioctl.h
new file mode 100644
index 000000000000..42fc4d6edd24
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/listmp_sharedmemory_ioctl.h
@@ -0,0 +1,258 @@
+/*
+ * listmp_sharedmemory_ioctl.h
+ *
+ * Definitions of listmp_sharedmemory driver types and structures.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _LISTMP_SHAREDMEMORY_IOCTL_H_
+#define _LISTMP_SHAREDMEMORY_IOCTL_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Syslink headers */
+#include <ipc_ioctl.h>
+#include <listmp_sharedmemory.h>
+#include <sharedregion.h>
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+/* Base command ID for listmp_sharedmemory */
+#define LISTMP_SHAREDMEMORY_IOC_MAGIC IPC_IOC_MAGIC
+enum listmp_sharedmemory_drv_cmd {
+ LISTMP_SHAREDMEMORY_GETCONFIG = LISTMP_SHAREDMEMORY_BASE_CMD,
+ LISTMP_SHAREDMEMORY_SETUP,
+ LISTMP_SHAREDMEMORY_DESTROY,
+ LISTMP_SHAREDMEMORY_PARAMS_INIT,
+ LISTMP_SHAREDMEMORY_CREATE,
+ LISTMP_SHAREDMEMORY_DELETE,
+ LISTMP_SHAREDMEMORY_OPEN,
+ LISTMP_SHAREDMEMORY_CLOSE,
+ LISTMP_SHAREDMEMORY_ISEMPTY,
+ LISTMP_SHAREDMEMORY_GETHEAD,
+ LISTMP_SHAREDMEMORY_GETTAIL,
+ LISTMP_SHAREDMEMORY_PUTHEAD,
+ LISTMP_SHAREDMEMORY_PUTTAIL,
+ LISTMP_SHAREDMEMORY_INSERT,
+ LISTMP_SHAREDMEMORY_REMOVE,
+ LISTMP_SHAREDMEMORY_NEXT,
+ LISTMP_SHAREDMEMORY_PREV,
+ LISTMP_SHAREDMEMORY_SHAREDMEMREQ
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for listmp_sharedmemory
+ * ----------------------------------------------------------------------------
+ */
+/* Command for listmp_sharedmemory_get_config */
+#define CMD_LISTMP_SHAREDMEMORY_GETCONFIG \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_GETCONFIG, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_setup */
+#define CMD_LISTMP_SHAREDMEMORY_SETUP \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_SETUP, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_destroy */
+#define CMD_LISTMP_SHAREDMEMORY_DESTROY \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_DESTROY, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_params_init */
+#define CMD_LISTMP_SHAREDMEMORY_PARAMS_INIT \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_PARAMS_INIT, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_create */
+#define CMD_LISTMP_SHAREDMEMORY_CREATE \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_CREATE, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_delete */
+#define CMD_LISTMP_SHAREDMEMORY_DELETE \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_DELETE, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_open */
+#define CMD_LISTMP_SHAREDMEMORY_OPEN \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_OPEN, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_close */
+#define CMD_LISTMP_SHAREDMEMORY_CLOSE \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_CLOSE, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_is_empty */
+#define CMD_LISTMP_SHAREDMEMORY_ISEMPTY \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_ISEMPTY, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_get_head */
+#define CMD_LISTMP_SHAREDMEMORY_GETHEAD \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_GETHEAD, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_get_tail */
+#define CMD_LISTMP_SHAREDMEMORY_GETTAIL \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_GETTAIL, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_put_head */
+#define CMD_LISTMP_SHAREDMEMORY_PUTHEAD \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_PUTHEAD, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_put_tail */
+#define CMD_LISTMP_SHAREDMEMORY_PUTTAIL \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_PUTTAIL, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_insert */
+#define CMD_LISTMP_SHAREDMEMORY_INSERT \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_INSERT, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_remove */
+#define CMD_LISTMP_SHAREDMEMORY_REMOVE \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_REMOVE, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_next */
+#define CMD_LISTMP_SHAREDMEMORY_NEXT \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_NEXT, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_prev */
+#define CMD_LISTMP_SHAREDMEMORY_PREV \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_PREV, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command for listmp_sharedmemory_shared_memreq */
+#define CMD_LISTMP_SHAREDMEMORY_SHAREDMEMREQ \
+ _IOWR(LISTMP_SHAREDMEMORY_IOC_MAGIC, LISTMP_SHAREDMEMORY_SHAREDMEMREQ, \
+ struct listmp_sharedmemory_cmd_args)
+
+/* Command arguments for listmp_sharedmemory */
+struct listmp_sharedmemory_cmd_args {
+ union {
+ struct {
+ void *listmp_handle;
+ struct listmp_params *params;
+ } params_init;
+
+ struct {
+ struct listmp_config *config;
+ } get_config;
+
+ struct {
+ struct listmp_config *config;
+ } setup;
+
+ struct {
+ void *listmp_handle;
+ struct listmp_params *params;
+ u32 name_len;
+ u32 shared_addr_srptr;
+ void *knl_gate;
+ } create;
+
+ struct {
+ void *listmp_handle;
+ } delete_listmp;
+
+ struct {
+ void *listmp_handle;
+ struct listmp_params *params;
+ u32 name_len;
+ u32 shared_addr_srptr;
+ void *knl_gate;
+ } open;
+
+ struct {
+ void *listmp_handle;
+ } close;
+
+ struct {
+ void *listmp_handle;
+ bool is_empty;
+ } is_empty;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ } get_head;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ } get_tail;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ } put_head;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ } put_tail;
+
+ struct {
+ void *listmp_handle;
+ u32 *new_elem_srptr;
+ u32 *cur_elem_srptr;
+ } insert;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ } remove;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ u32 *next_elem_srptr ;
+ } next;
+
+ struct {
+ void *listmp_handle;
+ u32 *elem_srptr ;
+ u32 *prev_elem_srptr ;
+ } prev;
+
+ struct {
+ void *listmp_handle;
+ struct listmp_params *params;
+ u32 bytes;
+ } shared_memreq;
+ } args;
+
+ int api_status;
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL functions for listmp_sharedmemory module
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * ioctl interface function for listmp_sharedmemory
+ */
+int listmp_sharedmemory_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _LISTMP_SHAREDMEMORY_IOCTL_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/messageq.h b/arch/arm/plat-omap/include/syslink/messageq.h
new file mode 100644
index 000000000000..90ba6f070048
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/messageq.h
@@ -0,0 +1,464 @@
+/*
+ * messageq.h
+ *
+ * The MessageQ module supports the structured sending and receiving of
+ * variable length messages. This module can be used for homogeneous or
+ * heterogeneous multi-processor messaging.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _MESSAGEQ_H_
+#define _MESSAGEQ_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/list.h>
+
+/* Syslink headers */
+#include <listmp.h>
+#include <messageq_transportshm.h>
+
+
+/*!
+ * @def MESSAGEQ_MODULEID
+ * @brief Unique module ID.
+ */
+#define MESSAGEQ_MODULEID (0xded2)
+
+/* =============================================================================
+ * All success and failure codes for the module
+ * =============================================================================
+ */
+
+/*!
+ * @def MESSAGEQ_STATUSCODEBASE
+ * @brief Error code base for MessageQ.
+ */
+#define MESSAGEQ_STATUSCODEBASE (MESSAGEQ_MODULEID << 12)
+
+/*!
+ * @def MESSAGEQ_MAKE_FAILURE
+ * @brief Macro to make error code.
+ */
+#define MESSAGEQ_MAKE_FAILURE(x) ((int) (0x80000000 + \
+ (MESSAGEQ_STATUSCODEBASE + \
+ (x))))
+
+/*!
+ * @def MESSAGEQ_MAKE_SUCCESS
+ * @brief Macro to make success code.
+ */
+#define MESSAGEQ_MAKE_SUCCESS(x) (MESSAGEQ_STATUSCODEBASE + (x))
+
+/*!
+ * @def MESSAGEQ_E_INVALIDARG
+ * @brief Argument passed to a function is invalid.
+ */
+#define MESSAGEQ_E_INVALIDARG MESSAGEQ_MAKE_FAILURE(1)
+
+/*!
+ * @def MESSAGEQ_E_MEMORY
+ * @brief Memory allocation failed.
+ */
+#define MESSAGEQ_E_MEMORY MESSAGEQ_MAKE_FAILURE(2)
+
+/*!
+ * @def MESSAGEQ_E_BUSY
+ * @brief the name is already registered or not.
+ */
+#define MESSAGEQ_E_BUSY MESSAGEQ_MAKE_FAILURE(3)
+
+/*!
+ * @def MESSAGEQ_E_FAIL
+ * @brief Generic failure.
+ */
+#define MESSAGEQ_E_FAIL MESSAGEQ_MAKE_FAILURE(4)
+
+/*!
+ * @def MESSAGEQ_E_NOTFOUND
+ * @brief name not found in the nameserver.
+ */
+#define MESSAGEQ_E_NOTFOUND MESSAGEQ_MAKE_FAILURE(5)
+
+/*!
+ * @def MESSAGEQ_E_INVALIDSTATE
+ * @brief Module is not initialized.
+ */
+#define MESSAGEQ_E_INVALIDSTATE MESSAGEQ_MAKE_FAILURE(6)
+
+/*!
+ * @def MESSAGEQ_E_NOTONWER
+ * @brief Instance is not created on this processor.
+ */
+#define MESSAGEQ_E_NOTONWER MESSAGEQ_MAKE_FAILURE(7)
+
+/*!
+ * @def MESSAGEQ_E_REMOTEACTIVE
+ * @brief Remote opener of the instance has not closed the instance.
+ */
+#define MESSAGEQ_E_REMOTEACTIVE MESSAGEQ_MAKE_FAILURE(8)
+
+/*!
+ * @def MESSAGEQ_E_INUSE
+ * @brief Indicates that the instance is in use..
+ */
+#define MESSAGEQ_E_INUSE MESSAGEQ_MAKE_FAILURE(9)
+
+/*!
+ * @def MESSAGEQ_E_INVALIDCONTEXT
+ * @brief Indicates that the api is called with wrong handle
+ */
+#define MESSAGEQ_E_INVALIDCONTEXT MESSAGEQ_MAKE_FAILURE(10)
+
+/*!
+ * @def MESSAGEQ_E_INVALIDMSG
+ * @brief Indicates that an invalid msg has been specified
+ *
+ */
+#define MESSAGEQ_E_INVALIDMSG MESSAGEQ_MAKE_FAILURE(11)
+
+/*!
+ * @def MESSAGEQ_E_INVALIDHEAPID
+ * @brief Indicates that an invalid heap has been specified
+ */
+#define MESSAGEQ_E_INVALIDHEAPID MESSAGEQ_MAKE_FAILURE(12)
+
+/*!
+ * @def MESSAGEQ_E_INVALIDPROCID
+ * @brief Indicates that an invalid proc id has been specified
+ */
+#define MESSAGEQ_E_INVALIDPROCID MESSAGEQ_MAKE_FAILURE(13)
+
+/*!
+ * @def MESSAGEQ_E_MAXREACHED
+ * @brief Indicates that all message queues are taken
+ */
+#define MESSAGEQ_E_MAXREACHED MESSAGEQ_MAKE_FAILURE(14)
+
+/*!
+ * @def MESSAGEQ_E_UNREGISTERHEAPID
+ * @brief Indicates that heap id has not been registered
+ */
+#define MESSAGEQ_E_UNREGISTERHEAPID MESSAGEQ_MAKE_FAILURE(15)
+
+/*!
+ * @def MESSAGEQ_E_CANNOTFREESTATICMSG
+ * @brief Indicates that static msg cannot be freed
+ */
+#define MESSAGEQ_E_CANNOTFREESTATICMSG MESSAGEQ_MAKE_FAILURE(16)
+
+/*!
+ * @def MESSAGEQ_E_HEAPIDINVALID
+ * @brief Indicates that the heap id is invalid
+ */
+#define MESSAGEQ_E_HEAPIDINVALID MESSAGEQ_MAKE_FAILURE(17)
+
+/*!
+ * @def MESSAGEQ_E_PROCIDINVALID
+ * @brief Indicates that the proc id is invalid
+ */
+#define MESSAGEQ_E_PROCIDINVALID MESSAGEQ_MAKE_FAILURE(18)
+
+/*!
+ * @def MESSAGEQ_E_OSFAILURE
+ * @brief Failure in OS call.
+ */
+#define MESSAGEQ_E_OSFAILURE MESSAGEQ_MAKE_FAILURE(19)
+
+/*!
+ * @def MESSAGEQ_E_ALREADYEXISTS
+ * @brief Specified entity already exists
+ */
+#define MESSAGEQ_E_ALREADYEXISTS MESSAGEQ_MAKE_FAILURE(20)
+
+/*!
+ * @def MESSAGEQ_E_TIMEOUT
+ * @brief Timeout while attempting to get a message
+ */
+#define MESSAGEQ_E_TIMEOUT MESSAGEQ_MAKE_FAILURE(21)
+
+/*!
+ * @def MESSAGEQ_SUCCESS
+ * @brief Operation successful.
+ */
+#define MESSAGEQ_SUCCESS MESSAGEQ_MAKE_SUCCESS(0)
+
+/*!
+ * @def MESSAGEQ_S_ALREADYSETUP
+ * @brief The MESSAGEQ module has already been setup in this process.
+ */
+#define MESSAGEQ_S_ALREADYSETUP MESSAGEQ_MAKE_SUCCESS(1)
+
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+/*!
+ * @brief Mask to extract version setting
+ */
+#define MESSAGEQ_HEADERVERSION 0x2000u
+
+/*!
+ * @brief Mask to extract priority setting
+ */
+#define MESSAGEQ_PRIORITYMASK 0x3u
+
+/*!
+ * @brief Mask to extract priority setting
+ */
+#define MESSAGEQ_TRANSPORTPRIORITYMASK 0x01u
+
+/*!
+ * Mask to extract version setting
+ */
+#define MESSAGEQ_VERSIONMASK 0xE000;
+
+/*!
+ * Used as the timeout value to specify wait forever
+ */
+#define MESSAGEQ_FOREVER (~((u32) 0))
+
+/*!
+ * Invalid message id
+ */
+#define MESSAGEQ_INVALIDMSGID 0xFFFF
+
+/*!
+ * Invalid message queue
+ */
+#define MESSAGEQ_INVALIDMESSAGEQ 0xFFFF
+
+/*!
+ * Indicates that if maximum number of message queues are already created,
+ * should allow growth to create additional Message Queue.
+ */
+#define MESSAGEQ_ALLOWGROWTH (~((u32) 0))
+
+/*!
+ * Number of types of priority queues for each transport
+ */
+#define MESSAGEQ_NUM_PRIORITY_QUEUES 2
+
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/*!
+ * Message priority
+ */
+enum messageq_priority {
+ MESSAGEQ_NORMALPRI = 0,
+ /*!< Normal priority message */
+ MESSAGEQ_HIGHPRI = 1,
+ /*!< High priority message */
+ MESSAGEQ_RESERVEDPRI = 2,
+ /*!< Reserved value for message priority */
+ MESSAGEQ_URGENTPRI = 3
+ /*!< Urgent priority message */
+};
+
+/*! Structure which defines the first field in every message */
+struct msgheader {
+ u32 reserved0;
+ /*!< Reserved field */
+ u32 reserved1;
+ /*!< Reserved field */
+ u32 msg_size;
+ /*!< Size of the message (including header) */
+ u16 flags;
+ /*!< Flags */
+ u16 msg_id;
+ /*!< Maximum length for Message queue names */
+ u16 dst_id;
+ /*!< Maximum length for Message queue names */
+ u16 dst_proc;
+ /*!< Maximum length for Message queue names */
+ u16 reply_id;
+ /*!< Maximum length for Message queue names */
+ u16 reply_proc;
+ /*!< Maximum length for Message queue names */
+ u16 src_proc;
+ /*!< Maximum length for Message queue names */
+ u16 heap_id;
+ /*!< Maximum length for Message queue names */
+ u32 reserved;
+ /*!< Reserved field */
+};
+/*! Structure which defines the first field in every message */
+#define messageq_msg struct msgheader *
+/*typedef struct msgheader *messageq_msg;*/
+
+
+/*!
+ * @brief Structure defining config parameters for the MessageQ Buf module.
+ */
+struct messageq_config {
+ u16 num_heaps;
+ /*!
+ * Number of heapIds in the system
+ *
+ * This allows MessageQ to pre-allocate the heaps table.
+ * The heaps table is used when registering heaps.
+ *
+ * The default is 1 since generally all systems need at least
+ * one heap.
+ *
+ * There is no default heap, so unless the system is only using
+ * staticMsgInit, the application must register a heap.
+ */
+
+ u32 max_runtime_entries;
+ /*!
+ * Maximum number of MessageQs that can be dynamically created
+ */
+
+ struct mutex *name_table_gate;
+ /*!
+ * Gate used to make the name table thread safe. If NULL is passed, gate
+ * will be created internally.
+ */
+
+ u32 max_name_len;
+ /*!
+ * Maximum length for Message queue names
+ */
+};
+
+struct messageq_params {
+ u32 reserved;
+ /*!< No parameters required currently. Reserved field. */
+ u32 max_name_len;
+ /*!< Maximum length for Message queue names */
+};
+
+/* =============================================================================
+ * APIs
+ * =============================================================================
+ */
+/* Functions to get the configuration for messageq setup */
+void messageq_get_config(struct messageq_config *cfg);
+
+/* Function to setup the MessageQ module. */
+int messageq_setup(const struct messageq_config *cfg);
+
+/* Function to destroy the MessageQ module. */
+int messageq_destroy(void);
+
+/* Initialize this config-params structure with supplier-specified
+ * defaults before instance creation.
+ */
+void messageq_params_init(void *messageq_handle,
+ struct messageq_params *params);
+
+/* Create a message queue */
+void *messageq_create(char *name, const struct messageq_params *params);
+
+/* Deletes a instance of MessageQ module. */
+int messageq_delete(void **messageq_handleptr);
+
+/* Allocates a message from the heap */
+messageq_msg messageq_alloc(u16 heapId, u32 size);
+
+/* Frees a message back to the heap */
+int messageq_free(messageq_msg msg);
+
+/* Open a message queue */
+int messageq_open(char *name, u32 *queue_id);
+
+/* Close an opened message queue handle */
+void messageq_close(u32 *queue_id);
+
+/* Initializes a message not obtained from MessageQ_alloc */
+void messageq_static_msg_init(messageq_msg msg, u32 size);
+
+/* Place a message onto a message queue */
+int messageq_put(u32 queueId, messageq_msg msg);
+
+/* Gets a message for a message queue and blocks if the queue is empty */
+int messageq_get(void *messageq_handle, messageq_msg *msg,
+ u32 timeout);
+
+/* Register a heap with MessageQ */
+int messageq_register_heap(void *heap_handle, u16 heap_id);
+
+/* Unregister a heap with MessageQ */
+int messageq_unregister_heap(u16 heapId);
+
+/* Returns the number of messages in a message queue */
+int messageq_count(void *messageq_handle);
+
+/* Set the destination queue of the message. */
+void messageq_set_reply_queue(void *messageq_handle, messageq_msg msg);
+
+/* Get the queue Id of the message. */
+u32 messageq_get_queue_id(void *messageq_handle);
+
+/* Get the proc Id of the message. */
+u16 messageq_get_proc_id(void *messageq_handle);
+
+/*
+ * Functions to set Message properties
+ */
+/*!
+ * @brief Returns the MessageQ_Queue handle of the destination
+ * message queue for the specified message.
+ */
+u32 messageq_get_dst_queue(messageq_msg msg);
+
+/*!
+ * @brief Returns the message ID of the specified message.
+ */
+u16 messageq_get_msg_id(messageq_msg msg);
+
+/*!
+ * @brief Returns the size of the specified message.
+ */
+u32 messageq_get_msg_size(messageq_msg msg);
+
+/*!
+ * @brief Gets the message priority of a message
+ */
+u32 messageq_get_msg_pri(messageq_msg msg);
+
+/*!
+ * @brief Returns the MessageQ_Queue handle of the destination
+ * message queue for the specified message.
+ */
+u32 messageq_get_reply_queue(messageq_msg msg);
+
+/*!
+ * @brief Sets the message ID in the specified message.
+ */
+void messageq_set_msg_id(messageq_msg msg, u16 msg_id);
+/*!
+ * @brief Sets the message priority in the specified message.
+ */
+void messageq_set_msg_pri(messageq_msg msg, u32 priority);
+
+/* =============================================================================
+ * APIs called internally by MessageQ transports
+ * =============================================================================
+ */
+/* Register a transport with MessageQ */
+int messageq_register_transport(void *messageq_transportshm_handle,
+ u16 proc_id, u32 priority);
+
+/* Unregister a transport with MessageQ */
+int messageq_unregister_transport(u16 proc_id, u32 priority);
+
+
+#endif /* _MESSAGEQ_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/messageq_ioctl.h b/arch/arm/plat-omap/include/syslink/messageq_ioctl.h
new file mode 100644
index 000000000000..2f8424c21d7e
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/messageq_ioctl.h
@@ -0,0 +1,237 @@
+/*
+ * messageq_ioctl.h
+ *
+ * Definitions of messageq driver types and structures.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _MESSAGEQ_IOCTL_H_
+#define _MESSAGEQ_IOCTL_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Syslink headers */
+#include <ipc_ioctl.h>
+#include <messageq.h>
+#include <heap.h>
+#include <sharedregion.h>
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+#define MESSAGEQ_IOC_MAGIC IPC_IOC_MAGIC
+enum messageq_drv_cmd {
+ MESSAGEQ_GETCONFIG = MESSAGEQ_BASE_CMD,
+ MESSAGEQ_SETUP,
+ MESSAGEQ_DESTROY,
+ MESSAGEQ_PARAMS_INIT,
+ MESSAGEQ_CREATE,
+ MESSAGEQ_DELETE,
+ MESSAGEQ_OPEN,
+ MESSAGEQ_CLOSE,
+ MESSAGEQ_COUNT,
+ MESSAGEQ_ALLOC,
+ MESSAGEQ_FREE,
+ MESSAGEQ_PUT,
+ MESSAGEQ_REGISTERHEAP,
+ MESSAGEQ_UNREGISTERHEAP,
+ MESSAGEQ_REGISTERTRANSPORT,
+ MESSAGEQ_UNREGISTERTRANSPORT,
+ MESSAGEQ_GET
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for messageq
+ * ----------------------------------------------------------------------------
+ */
+/* Base command ID for messageq */
+#define MESSAGEQ_BASE_CMD 0x0
+
+/* Command for messageq_get_config */
+#define CMD_MESSAGEQ_GETCONFIG \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_GETCONFIG, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_setup */
+#define CMD_MESSAGEQ_SETUP \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_SETUP, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_destroy */
+#define CMD_MESSAGEQ_DESTROY \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_DESTROY, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_params_init */
+#define CMD_MESSAGEQ_PARAMS_INIT \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_PARAMS_INIT, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_create */
+#define CMD_MESSAGEQ_CREATE \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_CREATE, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_delete */
+#define CMD_MESSAGEQ_DELETE \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_DELETE, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_open */
+#define CMD_MESSAGEQ_OPEN \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_OPEN, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_close */
+#define CMD_MESSAGEQ_CLOSE \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_CLOSE, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_count */
+#define CMD_MESSAGEQ_COUNT \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_COUNT, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_alloc */
+#define CMD_MESSAGEQ_ALLOC \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_ALLOC, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_free */
+#define CMD_MESSAGEQ_FREE \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_FREE, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_put */
+#define CMD_MESSAGEQ_PUT \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_PUT, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_register_heap */
+#define CMD_MESSAGEQ_REGISTERHEAP \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_REGISTERHEAP, \
+ struct messageq_cmd_args)
+
+/* Command for messageq_unregister_heap */
+#define CMD_MESSAGEQ_UNREGISTERHEAP \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_UNREGISTERHEAP, \
+ struct messageq_cmd_args)
+
+
+/* Command for messageq_register_transport */
+#define CMD_MESSAGEQ_REGISTERTRANSPORT \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_REGISTERTRANSPORT, \
+ struct messageq_cmd_args)
+
+
+/* Command for messageq_unregister_transport */
+#define CMD_MESSAGEQ_UNREGISTERTRANSPORT \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_UNREGISTERTRANSPORT, \
+ struct messageq_cmd_args)
+
+
+/* Command for messageq_get */
+#define CMD_MESSAGEQ_GET \
+ _IOWR(MESSAGEQ_IOC_MAGIC, MESSAGEQ_GET, \
+ struct messageq_cmd_args)
+
+/* Command arguments for messageq */
+struct messageq_cmd_args {
+ union {
+ struct {
+ void *messageq_handle;
+ struct messageq_params *params;
+ } params_init;
+
+ struct {
+ struct messageq_config *config;
+ } get_config;
+
+ struct {
+ struct messageq_config *config;
+ } setup;
+
+ struct {
+ void *messageq_handle;
+ char *name;
+ struct messageq_params *params;
+ u32 name_len;
+ u32 queue_id;
+ } create;
+
+ struct {
+ void *messageq_handle;
+ } delete_messageq;
+
+ struct {
+ char *name;
+ u32 queue_id;
+ u32 name_len;
+ } open;
+
+ struct {
+ u32 queue_id;
+ } close;
+
+ struct {
+ void *messageq_handle;
+ u32 timeout;
+ u32 *msg_srptr;
+ } get;
+
+ struct {
+ void *messageq_handle;
+ int count;
+ } count;
+
+ struct {
+ u16 heap_id;
+ u32 size;
+ u32 *msg_srptr;
+ } alloc;
+
+ struct {
+ u32 *msg_srptr;
+ } free;
+
+ struct {
+ u32 queue_id;
+ u32 *msg_srptr;
+ } put;
+
+ struct {
+ void *heap_handle;
+ u16 heap_id;
+ } register_heap;
+
+ struct {
+ u16 heap_id;
+ } unregister_heap;
+ } args;
+
+ int api_status;
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL functions for messageq module
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * ioctl interface function for messageq
+ */
+int messageq_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _MESSAGEQ_IOCTL_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/messageq_transportshm.h b/arch/arm/plat-omap/include/syslink/messageq_transportshm.h
new file mode 100644
index 000000000000..e2b55f6b0b4d
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/messageq_transportshm.h
@@ -0,0 +1,283 @@
+/*
+ * messageq_transportshm.h
+ *
+ * MessageQ shared memory based physical transport for
+ * communication with the remote processor.
+ *
+ * This file contains the declarations of types and APIs as part
+ * of interface of the MessageQ shared memory transport.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _MESSAGEQ_TRANSPORTSHM_H_
+#define _MESSAGEQ_TRANSPORTSHM_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/list.h>
+
+/* =============================================================================
+ * All success and failure codes for the module
+ * =============================================================================
+ */
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_MODULEID
+ * @brief Unique module ID.
+ */
+#define MESSAGEQ_TRANSPORTSHM_MODULEID (0x0a7a)
+
+/* =============================================================================
+ * All success and failure codes for the module
+ * =============================================================================
+ */
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_STATUSCODEBASE
+ * @brief Error code base for MessageQ.
+ */
+#define MESSAGEQ_TRANSPORTSHM_STATUSCODEBASE \
+ (MESSAGEQ_TRANSPORTSHM_MODULEID << 12)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE
+ * @brief Macro to make error code.
+ */
+#define MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(x) ((int) (0x80000000 \
+ + (MESSAGEQ_TRANSPORTSHM_STATUSCODEBASE \
+ + (x))))
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_MAKE_SUCCESS
+ * @brief Macro to make success code.
+ */
+#define MESSAGEQ_TRANSPORTSHM_MAKE_SUCCESS(x) \
+ (MESSAGEQ_TRANSPORTSHM_STATUSCODEBASE + (x))
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_INVALIDARG
+ * @brief Argument passed to a function is invalid.
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_INVALIDARG \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(1)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_INVALIDSIZE
+ * @brief Invalid shared address size
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_INVALIDSIZE \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(2)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_INVALIDSTATE
+ * @brief Module is not initialized.
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_INVALIDSTATE \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(3)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_BADVERSION
+ * @brief Versions don't match
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_BADVERSION \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(4)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_FAIL
+ * @brief General Failure
+*/
+#define MESSAGEQ_TRANSPORTSHM_E_FAIL \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(5)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_MEMORY
+ * @brief Memory allocation failed
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_MEMORY \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(6)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_OSFAILURE
+ * @brief Failure in OS call.
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_OSFAILURE \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(7)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_E_HANDLE
+ * @brief Invalid handle specified.
+ */
+#define MESSAGEQ_TRANSPORTSHM_E_HANDLE \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(8)
+
+/*!
+ * @def MESSAGEQTRANSPORTSHM_E_NOTSUPPORTED
+ * @brief The specified operation is not supported.
+ */
+#define MESSAGEQTRANSPORTSHM_E_NOTSUPPORTED \
+ MESSAGEQ_TRANSPORTSHM_MAKE_FAILURE(9)
+
+/*!
+ * @def MESSAGEQ_TRANSPORTSHM_SUCCESS
+ * @brief Operation successful.
+ */
+#define MESSAGEQ_TRANSPORTSHM_SUCCESS \
+ MESSAGEQ_TRANSPORTSHM_MAKE_SUCCESS(0)
+
+/*!
+ * @def MESSAGETRANSPORTSHM_S_ALREADYSETUP
+ * @brief The MESSAGETRANSPORTSHM module has
+ * already been setup in this process.
+ */
+#define MESSAGEQ_TRANSPORTSHM_S_ALREADYSETUP \
+ MESSAGEQ_TRANSPORTSHM_MAKE_SUCCESS(1)
+
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+
+/*!
+ * @brief Structure defining the reason for error function being called
+ */
+enum MessageQTransportShm_Reason {
+ MessageQTransportShm_Reason_FAILEDPUT,
+ /*!< Failed to send the message. */
+ MessageQTransportShm_Reason_INTERNALERR,
+ /*!< An internal error occurred in the transport */
+ MessageQTransportShm_Reason_PHYSICALERR,
+ /*!< An error occurred in the physical link in the transport */
+ MessageQTransportShm_Reason_FAILEDALLOC
+ /*!< Failed to allocate a message. */
+};
+
+/*!
+ * @brief transport error callback function.
+ *
+ * First parameter: Why the error function is being called.
+ *
+ * Second parameter: Handle of transport that had the error. NULL denotes
+ * that it is a system error, not a specific transport.
+ *
+ * Third parameter: Pointer to the message. This is only valid for
+ * #MessageQTransportShm_Reason_FAILEDPUT.
+ *
+ * Fourth parameter: Transport specific information. Refer to individual
+ * transports for more details.
+ */
+
+/*!
+ * @brief Module configuration structure.
+ */
+struct messageq_transportshm_config {
+ void (*err_fxn)(enum MessageQTransportShm_Reason reason,
+ void *handle,
+ void *msg,
+ u32 info);
+ /*!< Asynchronous error function for the transport module */
+};
+
+/*!
+ * @brief Structure defining config parameters for the MessageQ transport
+ * instances.
+ */
+struct messageq_transportshm_params {
+ u32 priority;
+ /*!< Priority of messages supported by this transport */
+ void *gate;
+ /*!< Gate used for critical region management of the shared memory */
+ void *shared_addr;
+ /*!< Address of the shared memory. The creator must supply the shared
+ * memory that this will use for maintain shared state information.
+ */
+ u32 shared_addr_size;
+ /*!< Size of shared region provided. */
+ u32 notify_event_no;
+ /*!< Notify event number to be used by the transport */
+ void *notify_driver;
+ /*!< Notify driver to be used by the transport */
+};
+
+/*!
+ * @brief Structure defining Transport status values
+ */
+enum messageq_transportshm_status {
+ messageq_transportshm_status_INIT,
+ /*!< MessageQ transport Shm instance has not not completed
+ * initialization. */
+ messageq_transportshm_status_UP,
+ /*!< MessageQ transport Shm instance is up and functional. */
+ messageq_transportshm_status_DOWN,
+ /*!< MessageQ transport Shm instance is down and not functional. */
+ messageq_transportshm_status_RESETTING
+ /*!< MessageQ transport Shm instance was up at one point and is in
+ * process of resetting.
+ */
+};
+
+
+/* =============================================================================
+ * APIs called by applications
+ * =============================================================================
+ */
+/* Function to get the default configuration for the MessageQTransportShm
+ * module. */
+void messageq_transportshm_get_config(struct messageq_transportshm_config *cfg);
+
+/* Function to setup the MessageQTransportShm module. */
+int messageq_transportshm_setup(const struct messageq_transportshm_config *cfg);
+
+/* Function to destroy the MessageQTransportShm module. */
+int messageq_transportshm_destroy(void);
+
+/* Get the default parameters for the NotifyShmDriver. */
+void messageq_transportshm_params_init(void *mqtshm_handle,
+ struct messageq_transportshm_params *params);
+
+/* Create an instance of the MessageQTransportShm. */
+void *messageq_transportshm_create(u16 proc_id,
+ const struct messageq_transportshm_params *params);
+
+/* Delete an instance of the MessageQTransportShm. */
+int messageq_transportshm_delete(void **mqtshm_handleptr);
+
+/* Get the shared memory requirements for the MessageQTransportShm. */
+u32 messageq_transportshm_shared_mem_req(const
+ struct messageq_transportshm_params *params);
+
+/* Set the asynchronous error function for the transport module */
+void messageq_transportshm_set_err_fxn(
+ void (*err_fxn)(
+ enum MessageQTransportShm_Reason reason,
+ void *handle,
+ void *msg,
+ u32 info));
+
+
+/* =============================================================================
+ * APIs called internally by MessageQ module.
+ * =============================================================================
+ */
+/* Put msg to remote list */
+int messageq_transportshm_put(void *mqtshm_handle, void *msg);
+
+/* Control Function */
+int messageq_transportshm_control(void *mqtshm_handle, u32 cmd,
+ u32 *cmd_arg);
+
+/* Get current status of the MessageQTransportShm */
+enum messageq_transportshm_status messageq_transportshm_get_status(
+ void *mqtshm_handle);
+
+#endif /* _MESSAGEQ_TRANSPORTSHM_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/messageq_transportshm_ioctl.h b/arch/arm/plat-omap/include/syslink/messageq_transportshm_ioctl.h
new file mode 100644
index 000000000000..3790dc0d22e8
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/messageq_transportshm_ioctl.h
@@ -0,0 +1,160 @@
+/*
+ * messageq_transportshm_ioctl.h
+ *
+ * Definitions of messageq_transportshm driver types and structures.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _MESSAGEQ_TRANSPORTSHM_IOCTL_H_
+#define _MESSAGEQ_TRANSPORTSHM_IOCTL_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Syslink headers */
+#include <ipc_ioctl.h>
+#include <messageq_transportshm.h>
+#include <sharedregion.h>
+
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+/* Base command ID for messageq_transportshm */
+#define MESSAGEQ_TRANSPORTSHM_IOC_MAGIC IPC_IOC_MAGIC
+enum messageq_transportshm_drv_cmd {
+ MESSAGEQ_TRANSPORTSHM_GETCONFIG = MESSAGEQ_TRANSPORTSHM_BASE_CMD,
+ MESSAGEQ_TRANSPORTSHM_SETUP,
+ MESSAGEQ_TRANSPORTSHM_DESTROY,
+ MESSAGEQ_TRANSPORTSHM_PARAMS_INIT,
+ MESSAGEQ_TRANSPORTSHM_CREATE,
+ MESSAGEQ_TRANSPORTSHM_DELETE,
+ MESSAGEQ_TRANSPORTSHM_PUT,
+ MESSAGEQ_TRANSPORTSHM_SHAREDMEMREQ,
+ MESSAGEQ_TRANSPORTSHM_GETSTATUS
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for messageq_transportshm
+ * ----------------------------------------------------------------------------
+ */
+/* Base command ID for messageq_transportshm */
+#define MESSAGEQ_TRANSPORTSHM_BASE_CMD 0x0
+
+/* Command for messageq_transportshm_get_config */
+#define CMD_MESSAGEQ_TRANSPORTSHM_GETCONFIG \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, \
+ MESSAGEQ_TRANSPORTSHM_GETCONFIG, struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_setup */
+#define CMD_MESSAGEQ_TRANSPORTSHM_SETUP \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, MESSAGEQ_TRANSPORTSHM_SETUP, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_setup */
+#define CMD_MESSAGEQ_TRANSPORTSHM_DESTROY \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, MESSAGEQ_TRANSPORTSHM_DESTROY, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_destroy */
+#define CMD_MESSAGEQ_TRANSPORTSHM_PARAMS_INIT \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, \
+ MESSAGEQ_TRANSPORTSHM_PARAMS_INIT, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_create */
+#define CMD_MESSAGEQ_TRANSPORTSHM_CREATE \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, MESSAGEQ_TRANSPORTSHM_CREATE, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_delete */
+#define CMD_MESSAGEQ_TRANSPORTSHM_DELETE \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, MESSAGEQ_TRANSPORTSHM_DELETE, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_put */
+#define CMD_MESSAGEQ_TRANSPORTSHM_PUT \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, MESSAGEQ_TRANSPORTSHM_PUT, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_shared_memreq */
+#define CMD_MESSAGEQ_TRANSPORTSHM_SHAREDMEMREQ \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, \
+ MESSAGEQ_TRANSPORTSHM_SHAREDMEMREQ, \
+ struct messageq_transportshm_cmd_args)
+
+/* Command for messageq_transportshm_get_status */
+#define CMD_MESSAGEQ_TRANSPORTSHM_GETSTATUS \
+ _IOWR(MESSAGEQ_TRANSPORTSHM_IOC_MAGIC, \
+ MESSAGEQ_TRANSPORTSHM_GETSTATUS, struct messageq_transportshm_cmd_args)
+
+/* Command arguments for messageq_transportshm */
+struct messageq_transportshm_cmd_args {
+ union {
+ struct {
+ struct messageq_transportshm_config *config;
+ } get_config;
+
+ struct {
+ struct messageq_transportshm_config *config;
+ } setup;
+
+ struct {
+ void *messageq_transportshm_handle;
+ struct messageq_transportshm_params *params;
+ } params_init;
+
+ struct {
+ void *messageq_transportshm_handle;
+ u16 proc_id;
+ struct messageq_transportshm_params *params;
+ u32 shared_addr_srptr;
+ void *knl_lock_handle;
+ void *knl_notify_driver;
+ } create;
+
+ struct {
+ void *messageq_transportshm_handle;
+ } delete_transport;
+
+ struct {
+ void *messageq_transportshm_handle;
+ u32 *msg_srptr;
+ } put;
+
+ struct {
+ void *messageq_transportshm_handle;
+ enum messageq_transportshm_status status;
+ } get_status;
+
+ struct {
+ struct messageq_transportshm_params *params;
+ u32 bytes;
+ } shared_memreq;
+ } args;
+
+ int api_status;
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL functions for messageq_transportshm module
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * ioctl interface function for messageq_transportshm
+ */
+int messageq_transportshm_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _MESSAGEQ_TRANSPORTSHM_IOCTL_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/multiproc.h b/arch/arm/plat-omap/include/syslink/multiproc.h
new file mode 100644
index 000000000000..6361bae80d19
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/multiproc.h
@@ -0,0 +1,83 @@
+/*
+* multiproc.h
+*
+* Many multi-processor modules have the concept of processor id. multiproc
+* centeralizes the processor id management.
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+
+#ifndef _MULTIPROC_H_
+#define _MULTIPROC_H_
+
+#include <linux/types.h>
+
+
+#define VOLATILE volatile
+
+/*
+ * Unique module ID
+ */
+#define MULTIPROC_MODULEID (u16)0xB522
+
+/* Macro to define invalid processor id */
+#define MULTIPROC_INVALIDID ((u16)0xFFFF)
+
+/*
+ * Maximum number of processors in the system
+ * OMAP4 has 4 processors in single core.
+ */
+#define MULTIPROC_MAXPROCESSORS 4
+
+/*
+ * Max name length for a processor name
+ */
+#define MULTIPROC_MAXNAMELENGTH 32
+
+/*
+ * Configuration structure for multiproc module
+ */
+struct multiproc_config {
+ s32 max_processors; /* Max number of procs for particular system */
+ char name_list[MULTIPROC_MAXPROCESSORS][MULTIPROC_MAXNAMELENGTH];
+ /* Name List for processors in the system */
+ u16 id; /* Local Proc ID. This needs to be set before calling any
+ other APIs */
+};
+
+/* =============================================================================
+ * APIs
+ * =============================================================================
+ */
+
+/* Function to get the default configuration for the multiproc module. */
+void multiproc_get_config(struct multiproc_config *cfg);
+
+/* Function to setup the multiproc Module */
+s32 multiproc_setup(struct multiproc_config *cfg);
+
+/* Function to destroy the multiproc module */
+s32 multiproc_destroy(void);
+
+/* Function to set local processor Id */
+int multiproc_set_local_id(u16 proc_id);
+
+/* Function to get processor id from processor name. */
+u16 multiproc_get_id(const char *proc_name);
+
+/* Function to get name from processor id. */
+char *multiproc_get_name(u16 proc_id);
+
+/* Function to get maximum proc Id in the system. */
+u16 multiproc_get_max_processors(void);
+
+#endif /* _MULTIPROC_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/multiproc_ioctl.h b/arch/arm/plat-omap/include/syslink/multiproc_ioctl.h
new file mode 100644
index 000000000000..0c9780136b02
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/multiproc_ioctl.h
@@ -0,0 +1,94 @@
+/*
+* multiproc_ioctl.h
+*
+* This provides the ioctl interface for multiproc module
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+
+#ifndef _MULTIPROC_IOCTL_H_
+#define _MULTIPROC_IOCTL_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <ipc_ioctl.h>
+#include <multiproc.h>
+
+enum CMD_MULTIPROC {
+ MULTIPROC_SETUP = MULTIPROC_BASE_CMD,
+ MULTIPROC_DESTROY,
+ MULTIPROC_GETCONFIG,
+ MULTIPROC_SETLOCALID
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for MultiProc
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * Command for multiproc_setup
+ */
+#define CMD_MULTIPROC_SETUP _IOWR(IPC_IOC_MAGIC, MULTIPROC_SETUP, \
+ struct multiproc_cmd_args)
+
+/*
+ * Command for multiproc_destroy
+ */
+#define CMD_MULTIPROC_DESTROY _IOWR(IPC_IOC_MAGIC, MULTIPROC_DESTROY, \
+ struct multiproc_cmd_args)
+
+/*
+ * Command for multiproc_get_config
+ */
+#define CMD_MULTIPROC_GETCONFIG _IOWR(IPC_IOC_MAGIC, MULTIPROC_GETCONFIG, \
+ struct multiproc_cmd_args)
+
+/*
+ * Command for multiproc_set_local_id
+ */
+#define CMD_MULTIPROC_SETLOCALID _IOWR(IPC_IOC_MAGIC, MULTIPROC_SETLOCALID, \
+ struct multiproc_cmd_args)
+
+/*
+ * Command arguments for multiproc
+ */
+union multiproc_arg {
+ struct {
+ struct multiproc_config *config;
+ } get_config;
+
+ struct {
+ struct multiproc_config *config;
+ } setup;
+
+ struct {
+ u16 id;
+ } set_local_id;
+};
+
+/*
+ * Command arguments for multiproc
+ */
+struct multiproc_cmd_args {
+ union multiproc_arg args;
+ s32 api_status;
+};
+
+/*
+ * This ioctl interface for multiproc module
+ */
+int multiproc_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _MULTIPROC_IOCTL_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/nameserver.h b/arch/arm/plat-omap/include/syslink/nameserver.h
new file mode 100644
index 000000000000..87265d856d9b
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/nameserver.h
@@ -0,0 +1,131 @@
+/*
+ * nameserver.h
+ *
+ * The nameserver module manages local name/value pairs that
+ * enables an application and other modules to store and retrieve
+ * values based on a name.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NAMESERVER_H_
+#define _NAMESERVER_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+/*
+ * NAMESERVER_MODULEID
+ * Unique module ID
+ */
+#define NAMESERVER_MODULEID (0xF414)
+
+/*
+ * Instance config-params object.
+ */
+struct nameserver_params {
+ bool check_existing; /* Prevents duplicate entry add in to the table */
+ void *gate_handle; /* Lock used for critical region of the table */
+ u16 max_name_len; /* Length, in MAUs, of name field */
+ u32 max_runtime_entries;
+ u32 max_value_len; /* Length, in MAUs, of the value field */
+ void *table_heap; /* Table is placed into a section on dyn creates */
+};
+
+/*
+ * Function to setup the nameserver module
+ */
+int nameserver_setup(void);
+
+/*
+ * Function to destroy the nameserver module
+ */
+int nameserver_destroy(void);
+
+/*
+ * Function to initialize the parameter structure
+ */
+int nameserver_params_init(struct nameserver_params *params);
+
+/*
+ * Function to initialize the parameter structure
+ */
+int nameserver_get_params(void *handle,
+ struct nameserver_params *params);
+
+/*
+ * Function to create a name server
+ */
+void *nameserver_create(const char *name,
+ const struct nameserver_params *params);
+
+/*
+ * Function to delete a name server
+ */
+int nameserver_delete(void **handle);
+
+/*
+ * Function to add a variable length value into the local table
+ */
+void *nameserver_add(void *handle, const char *name,
+ void *buffer, u32 length);
+
+/*
+ * Function to add a 32 bit value into the local table
+ */
+void *nameserver_add_uint32(void *handle,
+ const char *name, u32 value);
+
+/*
+ * Function to Retrieve the value portion of a name/value pair
+ */
+int nameserver_get(void *handle, const char *name,
+ void *buffer, u32 length, u16 procId[]);
+
+/*
+ * Function to get the value portion of a name/value pair from local table
+ */
+int nameserver_get_local(void *handle, const char *name,
+ void *buffer, u32 length);
+
+/*
+ * Function to removes a value/pair
+ */
+int nameserver_remove(void *handle, const char *name);
+
+/*
+ * Function to Remove an entry from the table
+ */
+int nameserver_remove_entry(void *nshandle, void *nsentry);
+
+/*
+ * Function to handle for a name
+ */
+void *nameserver_get_handle(const char *name);
+
+/*
+ * Function to Match the name
+ */
+int nameserver_match(void *handle, const char *name, u32 *value);
+
+/*
+ * Function to register a remote driver
+ */
+int nameserver_register_remote_driver(void *handle, u16 proc_id);
+
+/*
+ * Function to unregister a remote driver
+ */
+int nameserver_unregister_remote_driver(u16 proc_id);
+
+#endif /* _NAMESERVER_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/nameserver_ioctl.h b/arch/arm/plat-omap/include/syslink/nameserver_ioctl.h
new file mode 100644
index 000000000000..defb71fae47b
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/nameserver_ioctl.h
@@ -0,0 +1,230 @@
+/*
+* nameserver_ioctl.h
+*
+* This provides the ioctl interface for nameserver module
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+
+#ifndef _NAMESERVER_IOCTL_H_
+#define _NAMESERVER_IOCTL_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <ipc_ioctl.h>
+#include <nameserver.h>
+
+enum CMD_NAMESERVER {
+ NAMESERVER_SETUP = NAMESERVER_BASE_CMD,
+ NAMESERVER_DESTROY,
+ NAMESERVER_PARAMS_INIT,
+ NAMESERVER_CREATE,
+ NAMESERVER_DELETE,
+ NAMESERVER_ADD,
+ NAMESERVER_ADDUINT32,
+ NAMESERVER_GET,
+ NAMESERVER_GETLOCAL,
+ NAMESERVER_MATCH,
+ NAMESERVER_REMOVE,
+ NAMESERVER_REMOVEENTRY,
+ NAMESERVER_GETHANDLE,
+};
+
+/*
+ * IOCTL command IDs for nameserver
+ *
+ */
+/*
+ * Command for nameserver_setup
+ */
+#define CMD_NAMESERVER_SETUP _IOWR(IPC_IOC_MAGIC, NAMESERVER_SETUP, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_destroy
+ */
+#define CMD_NAMESERVER_DESTROY _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_DESTROY, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_params_init
+ */
+#define CMD_NAMESERVER_PARAMS_INIT _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_PARAMS_INIT, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_create
+ */
+#define CMD_NAMESERVER_CREATE _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_CREATE, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_delete
+ */
+#define CMD_NAMESERVER_DELETE _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_DELETE, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_add
+ */
+#define CMD_NAMESERVER_ADD _IOWR(IPC_IOC_MAGIC, NAMESERVER_ADD, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_addu32
+ */
+#define CMD_NAMESERVER_ADDUINT32 _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_ADDUINT32, \
+ struct nameserver_cmd_args)
+/*
+ * Command for nameserver_get
+ */
+#define CMD_NAMESERVER_GET _IOWR(IPC_IOC_MAGIC, NAMESERVER_GET, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_get_local
+ */
+#define CMD_NAMESERVER_GETLOCAL _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_GETLOCAL, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_match
+ */
+#define CMD_NAMESERVER_MATCH _IOWR(IPC_IOC_MAGIC, NAMESERVER_MATCH, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_remove
+ */
+#define CMD_NAMESERVER_REMOVE _IOWR(IPC_IOC_MAGIC, NAMESERVER_REMOVE,\
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_remove_entry
+ */
+#define CMD_NAMESERVER_REMOVEENTRY _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_REMOVEENTRY, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command for nameserver_get_handle
+ */
+#define CMD_NAMESERVER_GETHANDLE _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVER_GETHANDLE, \
+ struct nameserver_cmd_args)
+
+/*
+ * Command arguments for nameserver
+ */
+ union nameserver_arg {
+ struct {
+ struct nameserver_params *params;
+ } params_init;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ struct nameserver_params *params;
+ } create;
+
+ struct {
+ void *handle;
+ } delete_instance;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ void *buf;
+ s32 len;
+ void *entry;
+ struct nameserver_entry *node;
+ } add;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ u32 value;
+ void *entry;
+ } addu32;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ void *buf;
+ u32 len;
+ u16 *proc_id;
+ u32 proc_len;
+ u32 count;
+ } get;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ void *buf;
+ u32 len;
+ u32 count;
+ } get_local;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ u32 *value;
+ u32 count;
+ } match;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ } remove;
+
+ struct {
+ void *handle;
+ void *entry;
+ } remove_entry;
+
+ struct {
+ void *handle;
+ char *name;
+ u32 name_len;
+ } get_handle;
+};
+
+/*
+ * Command arguments for nameserver
+ */
+struct nameserver_cmd_args {
+ union nameserver_arg args;
+ s32 api_status;
+};
+
+/*
+ * This ioctl interface for nameserver module
+ */
+int nameserver_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _NAMESERVER_IOCTL_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/nameserver_remote.h b/arch/arm/plat-omap/include/syslink/nameserver_remote.h
new file mode 100644
index 000000000000..dbdcedfc7ac3
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/nameserver_remote.h
@@ -0,0 +1,39 @@
+/*
+ * nameserver_remote.h
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NAMESERVER_REMOTE_H_
+#define _NAMESERVER_REMOTE_H_
+
+#include <linux/types.h>
+
+/*
+ * Structure defining object for the nameserver remote driver
+ */
+struct nameserver_remote_object {
+ int (*get)(const struct nameserver_remote_object *obj,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len, void *reserved);
+ /* Function to get data from remote nameserver */
+ void *obj; /* Implementation specific object */
+};
+
+/*
+ * Function get data from remote name server
+ */
+int nameserver_remote_get(const struct nameserver_remote_object *handle,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len);
+
+#endif /* _NAMESERVER_REMOTE_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/nameserver_remotenotify.h b/arch/arm/plat-omap/include/syslink/nameserver_remotenotify.h
new file mode 100644
index 000000000000..852da8effcc6
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/nameserver_remotenotify.h
@@ -0,0 +1,100 @@
+/*
+ * nameserver_remotenotify.h
+ *
+ * The nameserver_remotenotify module provides functionality to get name
+ * value pair from a remote nameserver.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NAMESERVER_REMOTENOTIFY_H_
+#define _NAMESERVER_REMOTENOTIFY_H_
+
+#include <linux/types.h>
+
+/*
+ * NAMESERVERREMOTENOTIFY_MODULEID
+ * Unique module ID
+ */
+#define NAMESERVERREMOTENOTIFY_MODULEID (0x08FD)
+
+/*
+ * Module configuration structure
+ */
+struct nameserver_remotenotify_config {
+ u32 reserved;
+ /* Reserved value (not currently used) */
+};
+
+/*
+ * Module configuration structure
+ */
+struct nameserver_remotenotify_params {
+ u32 notify_event_no; /* Notify event number */
+ void *notify_driver; /* Notify Driver handle */
+ void *shared_addr; /* Address of the shared memory */
+ u32 shared_addr_size; /* Size of the shared memory */
+ void *gate; /* Handle to the gate used for protecting
+ nameserver add and delete */
+};
+
+/*
+ * Function to get the default configuration for the nameserver_remotenotify
+ * module
+ */
+void nameserver_remotenotify_get_config(
+ struct nameserver_remotenotify_config *cfg);
+
+/*
+ * Function to setup the nameserver_remotenotify module
+ */
+int nameserver_remotenotify_setup(struct nameserver_remotenotify_config *cfg);
+
+/*
+ * Function to destroy the nameserver_remotenotify module
+ */
+int nameserver_remotenotify_destroy(void);
+
+/*
+ * Function to get the current configuration values
+ */
+void nameserver_remotenotify_params_init(void *handle,
+ struct nameserver_remotenotify_params *params);
+
+/*
+ * Function to setup the Name Server remote notify
+ */
+void *nameserver_remotenotify_create(u16 proc_id,
+ const struct nameserver_remotenotify_params *params);
+
+/*
+ * Function to destroy the Name Server remote notify
+ */
+int nameserver_remotenotify_delete(void **handle);
+
+
+/*
+ * Function to get a name/value from remote nameserver
+ */
+int nameserver_remotenotify_get(void *handle,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len, void *reserved);
+
+/*
+ * Get the shared memory requirements for the nameserver_remotenotify
+ */
+u32 nameserver_remotenotify_shared_memreq(
+ const struct nameserver_remotenotify_params *params);
+
+
+#endif /* _NAMESERVER_REMOTENOTIFY_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/nameserver_remotenotify_ioctl.h b/arch/arm/plat-omap/include/syslink/nameserver_remotenotify_ioctl.h
new file mode 100644
index 000000000000..e8a355456c09
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/nameserver_remotenotify_ioctl.h
@@ -0,0 +1,163 @@
+/*
+ * nameserver_remotenotify_ioctl.h
+ *
+ * The nameserver_remotenotify module provides functionality to get name
+ * value pair from a remote nameserver.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NAMESERVERREMOTENOTIFY_DRVDEFS_H
+#define _NAMESERVERREMOTENOTIFY_DRVDEFS_H
+
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <ipc_ioctl.h>
+#include <nameserver_remotenotify.h>
+
+enum CMD_NAMESERVERREMOTENOTIFY {
+ NAMESERVERREMOTENOTIFY_GETCONFIG = NAMESERVERREMOTENOTIFY_BASE_CMD,
+ NAMESERVERREMOTENOTIFY_SETUP,
+ NAMESERVERREMOTENOTIFY_DESTROY,
+ NAMESERVERREMOTENOTIFY_PARAMS_INIT,
+ NAMESERVERREMOTENOTIFY_CREATE,
+ NAMESERVERREMOTENOTIFY_DELETE,
+ NAMESERVERREMOTENOTIFY_GET,
+ NAMESERVERREMOTENOTIFY_SHAREDMEMREQ
+};
+
+
+/*
+ * IOCTL command IDs for nameserver_remotenotify
+ *
+ */
+
+/*
+ * Command for nameserver_remotenotify_get_config
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_GETCONFIG _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_GETCONFIG, \
+ struct nameserver_remotenotify_cmd_args)
+/*
+ * Command for nameserver_remotenotify_setup
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_SETUP _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_SETUP, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command for nameserver_remotenotify_setup
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_DESTROY _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_DESTROY, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command for nameserver_remotenotify_destroy
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_PARAMS_INIT _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_PARAMS_INIT, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command for nameserver_remotenotify_create
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_CREATE _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_CREATE, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command for nameserver_remotenotify_delete
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_DELETE _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_DELETE, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command for nameserver_remotenotify_get
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_GET _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_GET, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command for nameserver_remotenotify_shared_memreq
+ */
+#define CMD_NAMESERVERREMOTENOTIFY_SHAREDMEMREQ _IOWR(IPC_IOC_MAGIC, \
+ NAMESERVERREMOTENOTIFY_SHAREDMEMREQ, \
+ struct nameserver_remotenotify_cmd_args)
+
+/*
+ * Command arguments for nameserver_remotenotify
+ */
+union nameserver_remotenotify_arg {
+ struct {
+ struct nameserver_remotenotify_config *config;
+ } get_config;
+
+ struct {
+ struct nameserver_remotenotify_config *config;
+ } setup;
+
+ struct {
+ void *handle;
+ struct nameserver_remotenotify_params *params;
+ } params_init;
+
+ struct {
+ void *handle;
+ u16 proc_id;
+ struct nameserver_remotenotify_params *params;
+ } create;
+
+ struct {
+ void *handle;
+ } delete_instance;
+
+ struct {
+ void *handle;
+ char *instance_name;
+ u32 instance_name_len;
+ char *name;
+ u32 name_len;
+ u8 *value;
+ s32 value_len;
+ void *reserved;
+ s32 len;
+ } get;
+
+ struct {
+ void *handle;
+ struct nameserver_remotenotify_params *params;
+ u32 shared_mem_size;
+ } shared_memreq;
+};
+
+/*
+ * Command arguments for nameserver_remotenotify
+ */
+struct nameserver_remotenotify_cmd_args {
+ union nameserver_remotenotify_arg args;
+ s32 api_status;
+};
+
+/*
+ * This ioctl interface for nameserver_remotenotify module
+ */
+int nameserver_remotenotify_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+
+#endif /* _NAMESERVERREMOTENOTIFY_DRVDEFS_H */
+
diff --git a/arch/arm/plat-omap/include/syslink/notify.h b/arch/arm/plat-omap/include/syslink/notify.h
new file mode 100644
index 000000000000..6cf0e943b85d
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify.h
@@ -0,0 +1,267 @@
+/*
+ * notify.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#if !defined NOTIFY_H
+#define NOTIFY_H
+
+#include <syslink/host_os.h>
+
+#define NOTIFY_MAX_DRIVERS 16
+
+/*
+ * desc Maximum length of the name of Notify drivers, inclusive of NULL
+ * string terminator.
+ *
+ */
+#define NOTIFY_MAX_NAMELEN 32
+
+#define NOTIFY_MODULEID 0x5f84
+
+/*
+ *Status code base for Notify module.
+ */
+#define NOTIFY_STATUSCODEBASE (NOTIFY_MODULEID << 12u)
+
+/*
+ * Macro to make error code.
+ */
+#define NOTIFY_MAKE_FAILURE(x) ((int)(0x80000000\
+ | (NOTIFY_STATUSCODEBASE + (x))))
+
+/*
+ * Macro to make success code.
+ */
+#define NOTIFY_MAKE_SUCCESS(x) (NOTIFY_STATUSCODEBASE + (x))
+
+/*
+ * Generic failure.
+ */
+#define NOTIFY_E_FAIL NOTIFY_MAKE_FAILURE(1)
+
+/*
+ * A timeout occurred while performing the specified operation.
+ */
+#define NOTIFY_E_TIMEOUT NOTIFY_MAKE_FAILURE(2)
+
+/*
+ *Configuration failure.
+ */
+#define NOTIFY_E_CONFIG NOTIFY_MAKE_FAILURE(3)
+
+/*
+ * The module is already initialized
+ */
+#define NOTIFY_E_ALREADYINIT NOTIFY_MAKE_FAILURE(4)
+
+/*
+ * Unable to find the specified entity (e.g. registered event, driver).
+ */
+#define NOTIFY_E_NOTFOUND NOTIFY_MAKE_FAILURE(5)
+
+/*
+ * The specified operation is not supported.
+ */
+#define NOTIFY_E_NOTSUPPORTED NOTIFY_MAKE_FAILURE(6)
+
+/*
+* Invalid event number specified to the Notify operation.
+ */
+#define NOTIFY_E_INVALIDEVENT NOTIFY_MAKE_FAILURE(7)
+
+/*
+ * Invalid pointer provided.
+ */
+#define NOTIFY_E_POINTER NOTIFY_MAKE_FAILURE(8)
+/*
+ * The specified value is out of valid range.
+ */
+#define NOTIFY_E_RANGE NOTIFY_MAKE_FAILURE(9)
+
+/* An invalid handle was provided.
+ */
+#define NOTIFY_E_HANDLE NOTIFY_MAKE_FAILURE(10)
+
+/*
+ * An invalid argument was provided to the API.
+ */
+#define NOTIFY_E_INVALIDARG NOTIFY_MAKE_FAILURE(11)
+
+/*
+ * A memory allocation failure occurred.
+ */
+#define NOTIFY_E_MEMORY NOTIFY_MAKE_FAILURE(12)
+
+/*
+ * The module has not been setup.
+ */
+#define NOTIFY_E_INVALIDSTATE NOTIFY_MAKE_FAILURE(13)
+
+/*
+ * Maximum number of supported drivers have already been registered.
+ */
+#define NOTIFY_E_MAXDRIVERS NOTIFY_MAKE_FAILURE(14)
+
+/*
+ * Invalid attempt to use a reserved event number.
+ */
+#define NOTIFY_E_RESERVEDEVENT NOTIFY_MAKE_FAILURE(15)
+
+/*
+ * The specified entity (e.g. driver) already exists.
+ */
+#define NOTIFY_E_ALREADYEXISTS NOTIFY_MAKE_FAILURE(16)
+
+/*
+ * brief The Notify driver has not been initialized.
+ */
+#define NOTIFY_E_DRIVERINIT NOTIFY_MAKE_FAILURE(17)
+
+/*
+* The remote processor is not ready to receive the event.
+ */
+#define NOTIFY_E_NOTREADY NOTIFY_MAKE_FAILURE(18)
+
+/*
+ * brief Failed to register driver with Notify module.
+ */
+#define NOTIFY_E_REGDRVFAILED NOTIFY_MAKE_FAILURE(19)
+
+/*
+* Failed to unregister driver with Notify module.
+ */
+#define NOTIFY_E_UNREGDRVFAILED NOTIFY_MAKE_FAILURE(20)
+
+/*
+* Failure in an OS-specific operation.
+ */
+#define NOTIFY_E_OSFAILURE NOTIFY_MAKE_FAILURE(21)
+
+/*
+ *Maximum number of supported events have already been registered.
+ */
+#define NOTIFY_E_MAXEVENTS NOTIFY_MAKE_FAILURE(22)
+
+/* Maximum number of supported user clients have already been
+ * registered.
+ */
+#define NOTIFY_E_MAXCLIENTS NOTIFY_MAKE_FAILURE(23)
+
+/* Operation is successful.
+ */
+#define NOTIFY_SUCCESS NOTIFY_MAKE_SUCCESS(0)
+
+/* The ProcMgr module has already been setup in this process.
+ */
+#define NOTIFY_S_ALREADYSETUP NOTIFY_MAKE_SUCCESS(1)
+
+/* Other ProcMgr clients have still setup the ProcMgr module.
+ */
+#define NOTIFY_S_SETUP NOTIFY_MAKE_SUCCESS(2)
+
+/* Other ProcMgr handles are still open in this process.
+ */
+#define NOTIFY_S_OPENHANDLE NOTIFY_MAKE_SUCCESS(3)
+
+/* The ProcMgr instance has already been created/opened in this process
+ */
+#define NOTIFY_S_ALREADYEXISTS NOTIFY_MAKE_SUCCESS(4)
+
+/* Maximum depth for nesting Notify_disable / Notify_restore calls.
+ */
+#define NOTIFY_MAXNESTDEPTH 2
+
+
+/* brief Macro to make a correct module magic number with refCount */
+#define NOTIFY_MAKE_MAGICSTAMP(x) ((NOTIFY_MODULEID << 12u) | (x))
+
+
+/*
+ * const NOTIFYSHMDRV_DRIVERNAME
+ *
+ * desc Name of the Notify Shared Memory Mailbox driver.
+ *
+ */
+#define NOTIFYMBXDRV_DRIVERNAME "NOTIFYMBXDRV"
+
+#define REG volatile
+/*
+ * const NOTIFYSHMDRV_RESERVED_EVENTS
+ *
+ * desc Maximum number of events marked as reserved events by the
+ * notify_shmdrv driver.
+ * If required, this value can be changed by the system integrator.
+ *
+ */
+#define NOTIFYSHMDRV_RESERVED_EVENTS 3
+
+/*
+* This key must be provided as the upper 16 bits of the eventNo when
+ * registering for an event, if any reserved event numbers are to be
+ * used.
+ */
+#define NOTIFY_SYSTEM_KEY 0xC1D2
+
+struct notify_config {
+ u32 maxDrivers;
+ /* Maximum number of drivers that can be created for Notify at a time */
+};
+
+typedef void (*notify_callback_fxn)(u16 proc_id, u32 eventNo, void *arg,
+ u32 payload);
+
+extern struct notify_module_object notify_state;
+
+/* Function to get the default configuration for the Notify module. */
+void notify_get_config(struct notify_config *cfg);
+
+/* Function to setup the Notify Module */
+int notify_setup(struct notify_config *cfg);
+
+/* Function to destroy the Notify module */
+int notify_destroy(void);
+
+/* Function to register an event */
+int notify_register_event(void *notify_driver_handle, u16 proc_id,
+ u32 event_no,
+ notify_callback_fxn notify_callback_fxn,
+ void *cbck_arg);
+
+/* Function to unregister an event */
+int notify_unregister_event(void *notify_driver_handle, u16 proc_id,
+ u32 event_no,
+ notify_callback_fxn notify_callback_fxn,
+ void *cbck_arg);
+
+/* Function to send an event to other processor */
+int notify_sendevent(void *notify_driver_handle, u16 proc_id,
+ u32 event_no, u32 payload, bool wait_clear);
+
+/* Function to disable Notify module */
+u32 notify_disable(u16 procId);
+
+/* Function to restore Notify module state */
+void notify_restore(u32 key, u16 proc_id);
+
+/* Function to disable particular event */
+void notify_disable_event(void *notify_driver_handle, u16 proc_id,
+ u32 event_no);
+
+/* Function to enable particular event */
+void notify_enable_event(void *notify_driver_handle, u16 proc_id, u32 event_no);
+
+#endif /* !defined NOTIFY_H */
+
diff --git a/arch/arm/plat-omap/include/syslink/notify_dispatcher.h b/arch/arm/plat-omap/include/syslink/notify_dispatcher.h
new file mode 100644
index 000000000000..efd87315815e
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify_dispatcher.h
@@ -0,0 +1,158 @@
+/*
+ * notify_dispatcher.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef __TMBX_H__
+#define __TMBX_H__
+
+
+#include <syslink/notifydefs.h>
+#include <linux/interrupt.h>
+
+#include <syslink/notifyerr.h>
+
+#define MAX_MBOX_MODULES 2
+#define MAX_MBOX_ISRS 32
+#define KErrNone 0
+#define KErrNotSupported 1
+#define KErrNotReady 2
+#define KErrArgument 2
+
+typedef void (*isr_call_back)(void *);
+
+struct mbox_config {
+ unsigned long int mbox_linear_addr;
+ unsigned long int mbox_modules;
+ signed long int interrupt_lines[MAX_MBOX_MODULES];
+ signed long int mailboxes[MAX_MBOX_MODULES];
+};
+
+struct mbox_isrs {
+ signed long int isrNo[MAX_MBOX_MODULES];
+ /* TODO: Remove this - seems to be unused.*/
+ isr_call_back isrs[MAX_MBOX_MODULES][MAX_MBOX_ISRS];
+ void *isr_params[MAX_MBOX_MODULES][MAX_MBOX_ISRS];
+};
+
+extern const unsigned long *linear_address;
+
+irqreturn_t notify_mailbx0_user0_isr(int temp, void *anArg, struct pt_regs *p);
+
+/*
+ *func ntfy_disp_bind_interrupt
+ *
+ * desc Bind an ISR to the HW interrupt line coming into the processor
+ */
+int ntfy_disp_bind_interrupt(int interrupt_no,
+ isr_call_back hw_isr,
+ void *isr_arg);
+
+
+/*
+ * desc Print the mailbox registers and other useful debug information
+ *
+ */
+void ntfy_disp_debug(void);
+
+
+/*
+ * func ntfy_disp_deinit
+ * desc Uninitialize the Mailbox Manager module
+ */
+int ntfy_disp_deinit(void);
+
+
+/*
+ * desc Return the pointer to the Mailbox Manager's configuration object
+ */
+struct mbox_config *ntfy_disp_get_config(void);
+
+
+/*
+ * desc Initialize the Mailbox Manager module
+ */
+int ntfy_disp_init(void);
+
+
+/*
+ * desc Disable a particular IRQ bit on a Mailbox IRQ Enable Register
+ */
+int ntfy_disp_interrupt_disable(unsigned long int mbox_module_no,
+ int a_irq_bit);
+
+
+/*
+ * desc Enable a particular IRQ bit on a Mailbox IRQ Enable Register
+ */
+int ntfy_disp_interrupt_enable(unsigned long int mbox_module_no,
+ int a_irq_bit);
+
+
+/*
+ * desc Read a message on a Mailbox FIFO queue
+ */
+int ntfy_disp_read(unsigned long int mbox_module_no,
+ int a_mbox_no,
+ int *messages,
+ int *num_messages,
+ short int read_all);
+
+
+/*
+ * func ntfy_disp_register
+ * desc Register a ISR callback associated with a particular IRQ bit on a
+ * Mailbox IRQ Enable Register
+ */
+int ntfy_disp_register(unsigned long int mbox_module_no,
+ int a_irq_bit,
+ isr_call_back isr_cbck_fn,
+ void *isrCallbackArgs);
+
+
+/*
+ * func ntfy_disp_send
+ * desc Send a message on a Mailbox FIFO queue
+ */
+int ntfy_disp_send(unsigned long int mbox_module_no,
+ int a_mbox_no,
+ int message);
+
+
+/*
+ * func ntfy_disp_unbind_interrupt
+ * desc Remove the ISR to the HW interrupt line coming into the processor
+ */
+int ntfy_disp_unbind_interrupt(int interrupt_no);
+
+
+/*
+ * func ntfy_disp_unregister
+ * desc Unregister a ISR callback associated with a particular IRQ bit on a
+ * Mailbox IRQ Enable Register
+ */
+int ntfy_disp_unregister(unsigned long int mbox_module_no,
+ int a_irq_bit);
+
+/*
+ * func notify_mailbx0_user0_isr
+ * desc mail ISR
+ *
+ */
+
+irqreturn_t notify_mailbx0_user0_isr(int temp, void *anArg, struct pt_regs *p);
+
+
+#endif
diff --git a/arch/arm/plat-omap/include/syslink/notify_driver.h b/arch/arm/plat-omap/include/syslink/notify_driver.h
new file mode 100644
index 000000000000..30a150174654
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify_driver.h
@@ -0,0 +1,44 @@
+/*
+ * notify_driver.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#if !defined NOTIFYDRIVER_H
+#define NOTIFYDRIVER_H
+
+#include<linux/list.h>
+
+/* ----------------------------------- Notify */
+#include <syslink/notifyerr.h>
+
+/* ----------------------------------- Notify driver */
+#include <syslink/notify_driverdefs.h>
+
+/* Function to register notify driver */
+int notify_register_driver(char *driver_name,
+ struct notify_interface *fn_table,
+ struct notify_driver_attrs *drv_attrs,
+ struct notify_driver_object **driver_handle);
+
+
+/* Function to unregister notify driver */
+int notify_unregister_driver(struct notify_driver_object *drv_handle);
+
+/* Function to find the driver in the list of drivers */
+int notify_get_driver_handle(char *driver_name,
+ struct notify_driver_object **handle);
+
+#endif /* !defined (NOTIFYDRIVER_H) */
+
diff --git a/arch/arm/plat-omap/include/syslink/notify_driverdefs.h b/arch/arm/plat-omap/include/syslink/notify_driverdefs.h
new file mode 100644
index 000000000000..0e79562d0867
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify_driverdefs.h
@@ -0,0 +1,440 @@
+/*
+ * notify_driverdefs.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#if !defined NOTIFYDRIVERDEFS_H
+#define NOTIFYDRIVERDEFS_H
+
+
+#include <syslink/host_os.h>
+
+/* ----------------------------------- Notify */
+#include <syslink/notify.h>
+#include <syslink/notify_shmdriver.h>
+#include <syslink/notifydefs.h>
+#include <syslink/multiproc.h>
+
+#define NOTIFY_BASE_CMD (0x100)
+
+/*
+ * Command for Notify_getConfig
+ */
+#define CMD_NOTIFY_GETCONFIG (NOTIFY_BASE_CMD + 1u)
+
+/*
+ * Command for Notify_setup
+ */
+#define CMD_NOTIFY_SETUP (NOTIFY_BASE_CMD + 2u)
+
+/*
+ * Command for Notify_destroy
+ */
+#define CMD_NOTIFY_DESTROY (NOTIFY_BASE_CMD + 3u)
+
+/*
+ * Command for Notify_registerEvent
+ */
+#define CMD_NOTIFY_REGISTEREVENT (NOTIFY_BASE_CMD + 4u)
+
+/*
+ * Command for Notify_unregisterEvent
+ */
+#define CMD_NOTIFY_UNREGISTEREVENT (NOTIFY_BASE_CMD + 5u)
+
+/*
+ * Command for Notify_sendEvent
+ */
+#define CMD_NOTIFY_SENDEVENT (NOTIFY_BASE_CMD + 6u)
+
+/*
+ * Command for Notify_disable
+ */
+#define CMD_NOTIFY_DISABLE (NOTIFY_BASE_CMD + 7u)
+
+/*
+ * Command for Notify_restore
+ */
+#define CMD_NOTIFY_RESTORE (NOTIFY_BASE_CMD + 8u)
+
+/*
+ * Command for Notify_disableEvent
+ */
+#define CMD_NOTIFY_DISABLEEVENT (NOTIFY_BASE_CMD + 9u)
+
+/*
+ * Command for Notify_enableEvent
+ */
+#define CMD_NOTIFY_ENABLEEVENT (NOTIFY_BASE_CMD + 10u)
+
+/*!
+ * @brief Command for Notify_attach
+ */
+#define CMD_NOTIFY_ATTACH (NOTIFY_BASE_CMD + 11u)
+
+/*!
+ * @brief Command for Notify_detach
+ */
+#define CMD_NOTIFY_DETACH (NOTIFY_BASE_CMD + 12u)
+
+/*
+ * const NOTIFY_SYSTEM_KEY_MASK
+ *
+ * desc Mask to check for system key.
+ *
+ */
+
+#define NOTIFY_SYSTEM_KEY_MASK (unsigned short int) 0xFFFF0000
+
+/*
+ * const NOTIFY_EVENT_MASK
+ *
+ * desc Mask to check for event ID.
+ *
+ */
+
+#define NOTIFY_EVENT_MASK (unsigned short int) 0x0000FFFF
+
+struct notify_cmd_args {
+ int apiStatus;
+ /* Status of the API being called. */
+};
+
+/*
+ * Command arguments for Notify_getConfig
+ */
+struct notify_cmd_args_get_config {
+ struct notify_cmd_args commonArgs;
+ struct notify_config *cfg;
+};
+
+/*
+ * Command arguments for Notify_setup
+ */
+struct notify_cmd_args_setup {
+ struct notify_cmd_args commonArgs;
+ struct notify_config *cfg;
+};
+
+/*
+ * Command arguments for Notify_destroy
+ */
+struct notify_cmd_args_destroy {
+ struct notify_cmd_args commonArgs;
+};
+
+/*
+ * Command arguments for Notify_registerEvent
+ */
+struct notify_cmd_args_register_event {
+ struct notify_cmd_args commonArgs;
+ struct notify_driver_object *handle;
+ u16 procId;
+ u32 eventNo;
+ notify_callback_fxn fnNotifyCbck;
+ void *cbckArg;
+ u32 pid;
+};
+
+/*
+ * Command arguments for Notify_unregisterEvent
+ */
+struct notify_cmd_args_unregister_event {
+ struct notify_cmd_args commonArgs;
+ struct notify_driver_object *handle;
+ u16 procId;
+ u32 eventNo;
+ notify_callback_fxn fnNotifyCbck;
+ void *cbckArg;
+ u32 pid;
+};
+
+/*
+ * Command arguments for Notify_sendEvent
+ */
+struct notify_cmd_args_send_event {
+ struct notify_cmd_args commonArgs;
+ struct notify_driver_object *handle;
+ u16 procId;
+ u32 eventNo;
+ u32 payload;
+ bool waitClear;
+};
+
+/*
+ * Command arguments for Notify_disable
+ */
+struct notify_cmd_args_disable {
+ struct notify_cmd_args commonArgs;
+ u16 procId;
+ u32 flags;
+};
+
+/*
+ * Command arguments for Notify_restore
+ */
+struct notify_cmd_args_restore {
+ struct notify_cmd_args commonArgs;
+ u32 key;
+ u16 procId;
+};
+
+/*
+ * Command arguments for Notify_disableEvent
+ */
+struct notify_cmd_args_disable_event {
+ struct notify_cmd_args commonArgs;
+ struct notify_driver_object *handle;
+ u16 procId;
+ u32 eventNo;
+};
+
+/*
+ * Command arguments for Notify_enableEvent
+ */
+struct notify_cmd_args_enable_event {
+ struct notify_cmd_args commonArgs;
+ void *notify_driver_handle;
+ u16 procId;
+ u32 eventNo;
+};
+
+/*
+ * Command arguments for Notify_exit
+ */
+struct notify_cmd_args_exit {
+ struct notify_cmd_args commonArgs;
+};
+
+
+enum {
+ NOTIFY_DRIVERINITSTATUS_NOTDONE = 0,
+ /* Driver initialization is not done. */
+ NOTIFY_DRIVERINITSTATUS_DONE = 1,
+ /* Driver initialization is complete. */
+ NOTIFY_DRIVERINITSTATUS_INPROGRESS = 2,
+ /* Driver initialization is in progress. */
+ NOTIFY_DRIVERINITSTATUS_ENDVALUE = 3
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ *This structure defines information for all processors supported by
+ *the Notify driver.
+ *An instance of this object is provided for each processor handled by
+ *the Notify driver, when registering itself with the Notify module.
+ *
+ */
+struct notify_driver_proc_info {
+ u32 max_events;
+ u32 reserved_events;
+ bool event_priority;
+ u32 payload_size;
+ u16 proc_id;
+};
+
+/*
+ * This structure defines the structure for specifying Notify driver
+ * attributes to the Notify module.
+ * This structure provides information about the Notify driver to the
+ * Notify module. The information is used by the Notify module mainly
+ * for parameter validation. It may also be used by the Notify module
+ * to take appropriate action if required, based on the characteristics
+ * of the Notify driver.
+ */
+struct notify_driver_attrs {
+ u32 numProc;
+ struct notify_driver_proc_info
+ proc_info[MULTIPROC_MAXPROCESSORS];
+};
+
+
+/* ========================================
+ * Function pointer types
+ * ========================================
+ */
+/*
+ * This type defines the function to register a callback for an event
+ * with the Notify driver.
+ * This function gets called internally from the Notify_registerEvent
+ * API. The Notify_registerEvent () function passes on the
+ * request into the Notify driver identified by the Notify Handle.
+ *
+ */
+typedef int(*NotifyDriver_RegisterEvent)(struct notify_driver_object *handle,
+ u16 procId, u32 eventNo, notify_callback_fxn cbckFxn,
+ void *cbckArg);
+/*
+ * This type defines the function to unregister a callback for an event
+ * with the Notify driver.
+ * This function gets called internally from the Notify_unregisterEvent
+ * API. The Notify_unregisterEvent () function passes on the
+ * request into the Notify driver identified by the Notify Handle.
+ *
+ */
+typedef int(*NotifyDriver_UnregisterEvent) (struct notify_driver_object *handle,
+ u16 procId, u32 eventNo, notify_callback_fxn cbckFxn,
+ void *cbckArg);
+
+/*
+ * This type defines the function to send a notification event to the
+ * registered users for this notification on the specified processor.
+ * This function gets called internally from the Notify_sendEvent ()
+ * API. The Notify_sendEvent () function passes on the initialization
+ * request into the Notify driver identified by the Notify Handle.
+ */
+typedef int(*NotifyDriver_SendEvent) (struct notify_driver_object *handle,
+ u16 procId, u32 eventNo, u32 payload, bool waitClear);
+
+/*
+ * This type defines the function to disable all events for the
+ * specified processor ID.
+ * This function gets called internally from the Notify_disable ()
+ * API. The Notify_disable () function passes on the request into the
+ * Notify driver identified by the Notify Handle.
+ */
+typedef u32(*NotifyDriver_Disable) (struct notify_driver_object *handle,
+ u16 procId);
+
+/*
+ * This type defines the function to restore all events for the
+ * specified processor ID.
+ * This function gets called internally from the Notify_restore ()
+ * API. The Notify_restore () function passes on the request into the
+ * Notify driver identified by the Notify Handle.
+ */
+typedef void (*NotifyDriver_Restore) (struct notify_driver_object *handle,
+ u32 key, u16 procId);
+
+/*
+ * This type defines the function to disable specified event for the
+ * specified processor ID.
+ * This function gets called internally from the Notify_disableEvent ()
+ * API. The Notify_disableEvent () function passes on the request into
+ * the Notify driver identified by the Notify Handle.
+ */
+typedef void (*NotifyDriver_DisableEvent) (struct notify_driver_object *handle,
+ u16 procId, u32 eventNo);
+
+/*
+ * This type defines the function to enable specified event for the
+ * specified processor ID.
+ * This function gets called internally from the Notify_enableEvent ()
+ * API. The Notify_enableEvent () function passes on the request into
+ * the Notify driver identified by the Notify Handle.
+ *
+ */
+typedef void (*NotifyDriver_EnableEvent) (struct notify_driver_object *handle,
+ u16 procId, u32 eventNo);
+
+
+/*
+ * This structure defines the function table interface for the Notify
+ * driver.
+ * This function table interface must be implemented by each Notify
+ * driver and registered with the Notify module.
+ *
+ */
+struct notify_interface {
+ NotifyDriver_RegisterEvent register_event;
+ /* interface function registerEvent */
+ NotifyDriver_UnregisterEvent unregister_event;
+ /* interface function unregisterEvent */
+ NotifyDriver_SendEvent send_event;
+ /* interface function sendEvent */
+ NotifyDriver_Disable disable;
+ /* interface function disable */
+ NotifyDriver_Restore restore;
+ /* interface function restore */
+ NotifyDriver_DisableEvent disable_event;
+ /* interface function disableEvent */
+ NotifyDriver_EnableEvent enable_event;
+};
+
+
+union notify_drv_procevents{
+ struct {
+ struct notify_shmdrv_attrs attrs;
+ struct notify_shmdrv_ctrl *ctrl_ptr;
+ } shm_events;
+
+ struct {
+ /*Attributes */
+ unsigned long int num_events;
+ unsigned long int send_event_pollcount;
+ /*Control Paramters */
+ unsigned long int send_init_status ;
+ struct notify_shmdrv_eventreg_mask reg_mask ;
+ } non_shm_events;
+};
+
+
+/*
+ * This structure defines the Notify driver object and handle used
+ * internally to contain all information required for the Notify driver
+ * This object contains all information for the Notify module to be
+ * able to identify and interact with the Notify driver.
+ */
+struct notify_driver_object {
+ int is_init;
+ struct notify_interface fn_table;
+ char name[NOTIFY_MAX_NAMELEN];
+ struct notify_driver_attrs attrs;
+ u32 *disable_flag[NOTIFY_MAXNESTDEPTH];
+ void *driver_object;
+};
+
+
+struct notify_drv_eventlist {
+ unsigned long int event_handler_count;
+ struct list_head listeners;
+};
+
+
+
+struct notify_drv_eventlistner{
+ struct list_head element;
+ fn_notify_cbck fn_notify_cbck;
+ void *cbck_arg;
+};
+
+
+struct notify_drv_proc_module {
+
+ unsigned long int proc_id;
+ struct notify_drv_eventlist *event_list;
+ struct notify_shmdrv_eventreg *reg_chart;
+ union notify_drv_procevents events_obj;
+};
+
+/*
+ * Defines the Notify state object, which contains all the module
+ * specific information.
+ */
+struct notify_module_object {
+ atomic_t ref_count;
+ struct notify_config cfg;
+ /* Notify configuration structure */
+ struct notify_config def_cfg;
+ /* Default module configuration */
+ struct mutex *gate_handle;
+ /* Handle of gate to be used for local thread safety */
+ struct notify_driver_object drivers[NOTIFY_MAX_DRIVERS];
+ /* Array of configured drivers. */
+ u32 disable_depth;
+ /* Current disable depth for Notify module. */
+};
+#endif /* !defined (NOTIFYDRIVERDEFS_H) */
+
diff --git a/arch/arm/plat-omap/include/syslink/notify_ducatidriver.h b/arch/arm/plat-omap/include/syslink/notify_ducatidriver.h
new file mode 100644
index 000000000000..16ad80e425a7
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify_ducatidriver.h
@@ -0,0 +1,200 @@
+/*
+ * notify_ducatidriver.h
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef NOTIFY_DUCATIDRIVER_H_
+#define NOTIFY_DUCATIDRIVER_H_
+
+
+
+/* Notify*/
+#include <syslink/GlobalTypes.h>
+#include <syslink/notifyerr.h>
+#include <syslink/notify_driverdefs.h>
+
+/*
+ * const NOTIFYDUCATI_DRIVERNAME
+ *
+ * desc Name of the ducati driver.
+ *
+ */
+
+#define IPC_BUF_ALIGN 128
+#define IPC_ALIGN(x, y) (unsigned long int)\
+((unsigned long int)((x + y - 1) / y) * y)
+
+
+#define NOTIFYDUCATI_DRIVERNAME "NOTIFY_DUCATIDRV"
+
+#define REG volatile
+
+
+extern u32 get_ducati_virt_mem();
+extern void unmap_ducati_virt_mem(u32 shm_virt_addr);
+
+/*
+* func notify_mbxdrv_register_event
+*
+* desc Register a callback for an event with the Notify driver.
+*
+*
+*/
+
+int notify_ducatidrv_register_event(
+ struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no,
+ fn_notify_cbck fn_notify_cbck,
+ void *cbck_arg) ;
+
+/*
+* func notify_mbxdrv_unregevent
+*
+* desc Unregister a callback for an event with the Notify driver.
+*
+*
+*/
+
+int notify_ducatidrv_unregister_event(
+ struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no,
+ fn_notify_cbck fn_notify_cbck,
+ void *cbck_arg) ;
+
+/*
+* func notify_mbxdrv_sendevent
+*
+* desc Send a notification event to the registered users for this
+* notification on the specified processor.
+*
+*
+*/
+
+int notify_ducatidrv_sendevent(struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no,
+ int payload,
+ short int wait_clear) ;
+
+/*
+* func notify_mbxdrv_disable
+*
+* desc Disable all events for this Notify driver.
+*
+*
+*/
+
+void *notify_ducatidrv_disable(struct notify_driver_object *handle);
+
+/*
+* func notify_mbxdrv_restore
+*
+* desc Restore the Notify driver to the state before the last disable was
+* called.
+*
+*
+*/
+
+int notify_ducatidrv_restore(struct notify_driver_object *handle,
+ void *flags) ;
+
+/*
+* func notify_mbxdrv_disable_event
+*
+* desc Disable a specific event for this Notify driver.
+*
+*
+*/
+
+int notify_ducatidrv_disable_event(
+ struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no) ;
+
+/*
+* func notify_mbxdrv_enable_event
+*
+* desc Enable a specific event for this Notify driver.
+*
+*
+*/
+
+int notify_ducatidrv_enable_event(
+ struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no) ;
+
+
+/*
+* func notify_mbxdrv_debug
+*
+* desc Print debug information for the Notify driver.
+*
+*
+*/
+
+int notify_ducatidrv_debug(struct notify_driver_object *handle) ;
+
+struct notify_ducatidrv_params {
+ int shared_addr;
+ int shared_addr_size;
+ int num_events;
+ int recv_int_id;
+ int send_int_id;
+ int remote_proc_id;
+ int num_reserved_events;
+ int send_event_poll_count;
+} ;
+
+/*
+ * struct notify_ducatidrv_config
+ *
+ */
+
+struct notify_ducatidrv_config {
+ u32 reserved;
+};
+
+/* Function to get the default configuration for the Notify module. */
+void notify_ducatidrv_getconfig(struct notify_ducatidrv_config *cfg);
+
+/* Function to setup the notify ducati driver with the given configuration*/
+int notify_ducatidrv_setup(struct notify_ducatidrv_config *cfg);
+
+/* Function to destroy the notify ducati driver*/
+int notify_ducatidrv_destroy(void);
+
+/* Function to create the ducati driver handle and performs initialization. */
+
+struct notify_driver_object *notify_ducatidrv_create(char *driver_name,
+ const struct notify_ducatidrv_params *params);
+
+/* Function to delete the ducati driver handle and performs de initialization.*/
+int notify_ducatidrv_delete(struct notify_driver_object **handle_ptr);
+
+/*Function to open the ducati driver */
+int notify_ducatidrv_open(char *driver_name,
+ struct notify_driver_object **handle_ptr);
+
+/*Function to close the ducati driver */
+int notify_ducatidrv_close(struct notify_driver_object **handle_ptr);
+
+/*Function to initialize the given parameters */
+void notify_ducatidrv_params_init(struct notify_driver_object *handle,
+ struct notify_ducatidrv_params *params);
+
+#endif /* !defined NOTIFY_SHMDRIVER_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/notify_ducatidriver_defs.h b/arch/arm/plat-omap/include/syslink/notify_ducatidriver_defs.h
new file mode 100644
index 000000000000..6d7b508ae533
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify_ducatidriver_defs.h
@@ -0,0 +1,152 @@
+/*
+ * notify_ducati_driverdefs.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef NOTIFY_DUCATIDRV_DEFS
+#define NOTIFY_DUCATIDRV_DEFS
+
+#include <syslink/notify_ducatidriver.h>
+
+/*
+ * brief Base structure for NotifyDriverShm command args. This needs to be
+ * the first field in all command args structures.
+ */
+struct notify_ducatidrv_cmdargs {
+ int api_status;
+};
+
+
+/*
+ * ioctl command IDs for NotifyDriverShm
+ *
+ */
+/*
+ * brief Base command ID for NotifyDriverShm
+ */
+#define NOTIFYDRIVERSHM_BASE_CMD 0x100
+
+/*
+ * brief Command for NotifyDriverShm_getConfig
+ */
+#define CMD_NOTIFYDRIVERSHM_GETCONFIG (NOTIFYDRIVERSHM_BASE_CMD + 1u)
+
+/*
+ * brief Command for NotifyDriverShm_setup
+ */
+#define CMD_NOTIFYDRIVERSHM_SETUP (NOTIFYDRIVERSHM_BASE_CMD + 2u)
+
+/*
+ * brief Command for NotifyDriverShm_setup
+ */
+#define CMD_NOTIFYDRIVERSHM_DESTROY (NOTIFYDRIVERSHM_BASE_CMD + 3u)
+
+/*
+ * brief Command for NotifyDriverShm_destroy
+ */
+#define CMD_NOTIFYDRIVERSHM_PARAMS_INIT (NOTIFYDRIVERSHM_BASE_CMD + 4u)
+
+/*
+ * brief Command for NotifyDriverShm_create
+ */
+#define CMD_NOTIFYDRIVERSHM_CREATE (NOTIFYDRIVERSHM_BASE_CMD + 5u)
+
+/*
+ * brief Command for NotifyDriverShm_delete
+ */
+#define CMD_NOTIFYDRIVERSHM_DELETE (NOTIFYDRIVERSHM_BASE_CMD + 6u)
+
+/*
+ * brief Command for NotifyDriverShm_open
+ */
+#define CMD_NOTIFYDRIVERSHM_OPEN (NOTIFYDRIVERSHM_BASE_CMD + 7u)
+
+/*
+ * brief Command for NotifyDriverShm_close
+ */
+#define CMD_NOTIFYDRIVERSHM_CLOSE (NOTIFYDRIVERSHM_BASE_CMD + 8u)
+
+
+/*
+ * @brief Command arguments for NotifyDriverShm_getConfig
+ */
+struct notify_ducatidrv_cmdargs_getconfig {
+ struct notify_ducatidrv_cmdargs common_args;
+ struct notify_ducatidrv_config *cfg;
+};
+
+/*
+ * brief Command arguments for NotifyDriverShm_setup
+ */
+struct notify_ducatidrv_cmdargs_setup {
+ struct notify_ducatidrv_cmdargs common_args;
+ struct notify_ducatidrv_config *cfg;
+};
+
+/*
+ * brief Command arguments for NotifyDriverShm_destroy
+ */
+struct notify_ducatidrv_cmdargs_destroy {
+ struct notify_ducatidrv_cmdargs common_args;
+} ;
+
+/*
+ * brief Command arguments for NotifyDriverShm_Params_init
+ */
+
+struct notify_ducatidrv_cmdargs_paramsinit {
+ struct notify_ducatidrv_cmdargs common_args;
+ struct notify_driver_object *handle;
+ struct notify_ducatidrv_params *params;
+};
+
+/*!
+ * @brief Command arguments for NotifyDriverShm_create
+ */
+struct notify_ducatidrv_cmdargs_create {
+ struct notify_ducatidrv_cmdargs common_args;
+ char driverName[NOTIFY_MAX_NAMELEN];
+ struct notify_ducatidrv_params params;
+ struct notify_driver_object *handle;
+};
+
+/*
+ * brief Command arguments for NotifyDriverShm_delete
+ */
+struct notify_ducatidrv_cmdargs_delete {
+ struct notify_ducatidrv_cmdargs common_args;
+ struct notify_driver_object *handle;
+};
+
+/*
+ * brief Command arguments for NotifyDriverShm_open
+ */
+struct notify_ducatidrv_cmdargs_open {
+ struct notify_ducatidrv_cmdargs common_args;
+ char driverName[NOTIFY_MAX_NAMELEN];
+ struct notify_driver_object *handle;
+
+};
+
+/*
+ * brief Command arguments for NotifyDriverShm_close
+ */
+struct notify_ducatidrv_cmdargs_close {
+ struct notify_ducatidrv_cmdargs common_args;
+ struct notify_driver_object *handle;
+
+};
+
+#endif /*NOTIFY_DUCATIDRV_DEFS*/
diff --git a/arch/arm/plat-omap/include/syslink/notify_shmdriver.h b/arch/arm/plat-omap/include/syslink/notify_shmdriver.h
new file mode 100644
index 000000000000..450896160bdf
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notify_shmdriver.h
@@ -0,0 +1,108 @@
+
+/*
+ * notify_shmdriver.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#if !defined NOTIFY_SHMDRIVER_H_
+#define NOTIFY_SHMDRIVER_H_
+
+/*
+ * const NOTIFYSHMDRV_DRIVERNAME
+ *
+ * desc Name of the Notify Shared Memory Mailbox driver.
+ *
+ */
+#define NOTIFYSHMDRV_DRIVERNAME "NOTIFYSHMDRV"
+
+/*
+ * const NOTIFYSHMDRV_RESERVED_EVENTS
+ *
+ * desc Maximum number of events marked as reserved events by the
+ * NotiyShmDrv driver.
+ * If required, this value can be changed by the system integrator.
+ *
+ */
+
+#define NOTIFYSHMDRV_RESERVED_EVENTS 3
+
+
+/*
+ * name notify_shmdrv_attrs
+ *
+ */
+struct notify_shmdrv_attrs {
+ unsigned long int shm_base_addr;
+ unsigned long int shm_size;
+ unsigned long int num_events;
+ unsigned long int send_event_pollcount;
+};
+
+
+/*
+* name notify_shmdrv_event_entry
+*/
+struct notify_shmdrv_event_entry {
+ REG unsigned long int flag;
+ REG unsigned long int payload;
+ REG unsigned long int reserved;
+ unsigned long int padding[29];
+};
+
+/*
+* name notify_shmdrv_eventreg_mask
+*
+*/
+struct notify_shmdrv_eventreg_mask {
+ REG unsigned long int mask;
+ REG unsigned long int enable_mask;
+ unsigned long int padding[30];
+};
+
+/*
+* name notify_shmdrv_eventreg
+*
+*/
+struct notify_shmdrv_eventreg {
+ unsigned long int reg_event_no;
+ unsigned long int reserved;
+};
+
+/*
+* name notify_shmdrv_proc_ctrl
+*
+*/
+struct notify_shmdrv_proc_ctrl {
+ struct notify_shmdrv_event_entry *self_event_chart;
+ struct notify_shmdrv_event_entry *other_event_chart;
+ unsigned long int recv_init_status;
+ unsigned long int send_init_status;
+ unsigned long int padding[28];
+ struct notify_shmdrv_eventreg_mask reg_mask;
+ struct notify_shmdrv_eventreg *reg_chart;
+};
+
+/*
+ * brief Defines the notify_shmdrv_ctrl structure, which contains all
+ * information shared between the two connected processors
+ * This structure is shared between the two processors.
+ */
+struct notify_shmdrv_ctrl {
+ struct notify_shmdrv_proc_ctrl proc_ctrl[2];
+};
+
+#endif /* !defined NOTIFY_SHMDRIVER_H_ */
+
+
diff --git a/arch/arm/plat-omap/include/syslink/notifydefs.h b/arch/arm/plat-omap/include/syslink/notifydefs.h
new file mode 100644
index 000000000000..7f37346a7f75
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notifydefs.h
@@ -0,0 +1,25 @@
+/*
+ * notifydefs.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#if !defined NOTIFYDEFS_H
+#define NOTIFYDEFS_H
+typedef void (*fn_notify_cbck) (unsigned long int procId,
+ u32 eventNo,
+ void *arg,
+ u32 payload) ;
+
+#endif /* !defined (NOTIFYDEFS_H) */
diff --git a/arch/arm/plat-omap/include/syslink/notifyerr.h b/arch/arm/plat-omap/include/syslink/notifyerr.h
new file mode 100644
index 000000000000..9bbaa238fa3a
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/notifyerr.h
@@ -0,0 +1,198 @@
+/*
+ * notifyerr.h
+ *
+ * Notify driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#if !defined NOTIFYERR_H
+#define NOTIFYERR_H
+
+
+/*
+ * name NOTIFY_SUCCEEDED
+ *
+ * desc Check if the provided status code indicates a success code.
+ *
+ * arg status
+ * Status code to be checked
+ *
+ * ret TRUE
+ * If status code indicates success
+ * FALSE
+ * If status code indicates failure
+ *
+ * enter None.
+ *
+ * leave None.
+ *
+ * see NOTIFY_FAILED
+ *
+ */
+#define NOTIFY_SUCCEEDED(status)\
+(((signed long int) (status) >= (NOTIFY_SBASE)) \
+&& ((signed long int) (status) <= (NOTIFY_SLAST)))
+
+
+/*
+ * @name NOTIFY_FAILED
+ *
+ * @desc Check if the provided status code indicates a failure code.
+ *
+ * @arg status
+ * Status code to be checked
+ *
+ * @ret TRUE
+ * If status code indicates failure
+ * FALSE
+ * If status code indicates success
+ *
+ * @enter None.
+ *
+ * @leave None.
+ *
+ * @see NOTIFY_FAILED
+ *
+ */
+#define NOTIFY_FAILED(status) (!NOTIFY_SUCCEEDED(status))
+
+
+
+/*
+ * name NOTIFY_SBASE, NOTIFY_SLAST
+ *
+ * desc Defines the base and range for the success codes used by the
+ * Notify module
+ *
+ */
+#define NOTIFY_SBASE (signed long int)0x00002000l
+#define NOTIFY_SLAST (signed long int)0x000020FFl
+
+/*
+ * name NOTIFY_EBASE, NOTIFY_ELAST
+ *
+ * desc Defines the base and range for the failure codes used by the
+ * Notify module
+ *
+ */
+#define NOTIFY_EBASE (signed long int)0x80002000l
+#define NOTIFY_ELAST (signed long int)0x800020FFl
+
+
+/*
+ * SUCCESS Codes
+ *
+ */
+
+/* Generic success code for Notify module */
+#define NOTIFY_SOK (NOTIFY_SBASE + 0x01l)
+
+/* Indicates that the Notify module (or driver) has already been initialized
+ * by another client, and this process has now successfully acquired the right
+ * to use the Notify module.
+ */
+#define NOTIFY_SALREADYINIT (NOTIFY_SBASE + 0x02l)
+
+/* Indicates that the Notify module (or driver) is now being finalized, since
+ * the calling client is the last one finalizing the module, and all open
+ * handles to it have been closed.
+ */
+#define NOTIFY_SEXIT (NOTIFY_SBASE + 0x03l)
+
+
+/*
+ * FAILURE Codes
+ *
+ */
+
+/* Generic failure code for Notify module */
+#define NOTIFY_EFAIL (NOTIFY_EBASE + 0x01l)
+
+/* This failure code indicates that an operation has timed out. */
+#define NOTIFY_ETIMEOUT (NOTIFY_EBASE + 0x02l)
+
+/* This failure code indicates a configuration error */
+#define NOTIFY_ECONFIG (NOTIFY_EBASE + 0x03l)
+
+/* This failure code indicates that the Notify module has already been
+ * initialized from the calling client (process).
+ */
+#define NOTIFY_EALREADYINIT (NOTIFY_EBASE + 0x04l)
+
+/* This failure code indicates that the specified entity was not found
+ * The interpretation of this error code depends on the function from which it
+ * was returned.
+ */
+#define NOTIFY_ENOTFOUND (NOTIFY_EBASE + 0x05l)
+
+/* This failure code indicates that the specified feature is not supported
+ * The interpretation of this error code depends on the function from which it
+ * was returned.
+ */
+#define NOTIFY_ENOTSUPPORTED (NOTIFY_EBASE + 0x06l)
+
+/* This failure code indicates that the specified event number is
+ * not supported
+ */
+#define NOTIFY_EINVALIDEVENT (NOTIFY_EBASE + 0x07l)
+
+/* This failure code indicates that the specified pointer is invalid */
+#define NOTIFY_EPOINTER (NOTIFY_EBASE + 0x08l)
+
+/* This failure code indicates that a provided parameter was outside its valid
+ * range.
+ * The interpretation of this error code depends on the function from which it
+ * was returned.
+ */
+#define NOTIFY_ERANGE (NOTIFY_EBASE + 0x09l)
+
+/* This failure code indicates that the specified handle is invalid */
+#define NOTIFY_EHANDLE (NOTIFY_EBASE + 0x0Al)
+
+/* This failure code indicates that an invalid argument was specified */
+#define NOTIFY_EINVALIDARG (NOTIFY_EBASE + 0x0Bl)
+
+/* This failure code indicates a memory related failure */
+#define NOTIFY_EMEMORY (NOTIFY_EBASE + 0x0Cl)
+
+/* This failure code indicates that the Notify module has not been initialized*/
+#define NOTIFY_EINIT (NOTIFY_EBASE + 0x0Dl)
+
+/* This failure code indicates that a resource was not available.
+ * The interpretation of this error code depends on the function from which it
+ * was returned.
+ */
+#define NOTIFY_ERESOURCE (NOTIFY_EBASE + 0x0El)
+
+/* This failure code indicates that there was an attempt to register for a
+ * reserved event.
+ */
+#define NOTIFY_ERESERVEDEVENT (NOTIFY_EBASE + 0x0Fl)
+
+/* This failure code indicates that the specified entity already exists.
+ * The interpretation of this error code depends on the function from which it
+ * was returned.
+ */
+#define NOTIFY_EALREADYEXISTS (NOTIFY_EBASE + 0x10l)
+
+/* This failure code indicates that the Notify driver has not been fully
+ * initialized
+ */
+#define NOTIFY_EDRIVERINIT (NOTIFY_EBASE + 0x11l)
+
+/* This failure code indicates that the other side is not ready to receive
+ * notifications.
+ */
+#define NOTIFY_ENOTREADY (NOTIFY_EBASE + 0x12l)
+
+#endif /* !defined (NOTIFYERR_H) */
diff --git a/arch/arm/plat-omap/include/syslink/platform.h b/arch/arm/plat-omap/include/syslink/platform.h
new file mode 100644
index 000000000000..298d20f7ab5d
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/platform.h
@@ -0,0 +1,45 @@
+/*
+ * platform.h
+ *
+ * Defines the platform functions to be used by SysMgr module.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _PLATFORM_H_
+#define _PLATFORM_H_
+
+/* Module headers */
+#include <sysmgr.h>
+
+/* =============================================================================
+ * APIs
+ * =============================================================================
+ */
+/* Function to setup the platform */
+s32 platform_setup(struct sysmgr_config *config);
+
+/* Function to destroy the platform */
+s32 platform_destroy(void);
+
+/* Function called when slave is loaded with executable */
+void platform_load_callback(void *arg);
+
+/* Function called when slave is in started state*/
+void platform_start_callback(void *arg);
+
+/* Function called when slave is stopped state */
+void platform_stop_callback(void *arg);
+
+s32 platform_override_config(struct sysmgr_config *config);
+
+#endif /* ifndef _PLATFORM_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/platform_mem.h b/arch/arm/plat-omap/include/syslink/platform_mem.h
new file mode 100644
index 000000000000..874a1153fc21
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/platform_mem.h
@@ -0,0 +1,137 @@
+/*
+ * platform_mem.c
+ *
+ * Target memory management interface implementation.
+ *
+ * This abstracts the Memory management interface in the kernel
+ * code. Allocation, Freeing-up, copy and address translate are
+ * supported for the kernel memory management.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _PLATFORM_MEM_H_
+#define _PLATFORM_MEM_H_
+
+#include <linux/types.h>
+
+/*
+ * MEMORYOS_MODULEID
+ * Module ID for platform mem module
+ */
+#define PLATFORM_MEM_MODULEID (u16) 0x97D2
+
+/*
+ * Enumerates the types of caching for memory regions
+ */
+enum platform_mem_cache_flags {
+ PLATFORM_MEM_CACHE_FLAGS_DEFAULT = 0x00000000,
+ /* Default flags - Cached */
+ PLATFORM_MEM_CACHE_FLAGS_CACHED = 0x00010000,
+ /* Cached memory */
+ PLATFORM_MEM_CACHE_FLAGS_UNCACHED = 0x00020000,
+ /* Uncached memory */
+ PLATFORM_MEM_CACHE_FLAGS_END_VALUE = 0x00030000
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ * Enumerates the types of memory allocation
+ */
+enum platform_mem_mtype_flags{
+ PLATFORM_MEM_MTYPE_FLAGS_DEFAULT = 0x00000000,
+ /* Default flags - virtually contiguous */
+ PLATFORM_MEM_MTYPE_FLAGS_PHYSICAL = 0x00000001,
+ /* Physically contiguous */
+ PLATFORM_MEM_MTYPE_FLAGS_DMA = 0x00000002,
+ /* Physically contiguous */
+ PLATFORM_MEM_MTYPE_FLAGS_END_VALUE = 0x00000003
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ * Enumerates the types of translation
+ */
+enum memory_xlt_flags{
+ PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS = 0x00000000,
+ /* Virtual to physical */
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT = 0x00000001,
+ /* Virtual to physical */
+ PLATFORM_MEM_XLT_FLAGS_END_VALUE = 0x00000002
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ * Structure containing information required for mapping a
+ * memory region.
+ */
+struct platform_mem_map_info {
+ u32 src;
+ /* Address to be mapped. */
+ u32 size;
+ /* Size of memory region to be mapped. */
+ u32 dst;
+ /* Mapped address. */
+ bool is_cached;
+ /* Whether the mapping is to a cached area or uncached area. */
+ void *drv_handle;
+ /* Handle to the driver that is implementing the mmap call. Ignored for
+ Kernel-side version. */
+};
+
+/*
+ * Structure containing information required for unmapping a
+ * memory region.
+ */
+struct platform_mem_unmap_info {
+ u32 addr;
+ /* Address to be unmapped.*/
+ u32 size;
+ /* Size of memory region to be unmapped.*/
+ bool is_cached;
+ /* Whether the mapping is to a cached area or uncached area. */
+};
+
+/*
+ * Structure containing information required for mapping a
+ * memory region.
+ */
+#define memory_map_info struct platform_mem_map_info
+
+/*
+ * Structure containing information required for unmapping a
+ * memory region.
+ */
+#define memory_unmap_info struct platform_mem_unmap_info
+
+
+/* =============================================================================
+ * APIs
+ * =============================================================================
+ */
+/* Initialize the platform mem module. */
+int platform_mem_setup(void);
+
+/* Finalize the platform mem module. */
+int platform_mem_destroy(void);
+
+/* Maps a memory area into virtual space. */
+int platform_mem_map(memory_map_info *map_info);
+
+/* Unmaps a memory area into virtual space. */
+int platform_mem_unmap(memory_unmap_info *unmap_info);
+
+/* Translate API */
+void *platform_mem_translate(void *srcAddr, enum memory_xlt_flags flags);
+
+#endif /* ifndef _PLATFORM_MEM_H_ */
+
diff --git a/arch/arm/plat-omap/include/syslink/procmgr.h b/arch/arm/plat-omap/include/syslink/procmgr.h
new file mode 100644
index 000000000000..4d113c9fa90d
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/procmgr.h
@@ -0,0 +1,280 @@
+/*
+ * procmgr.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#ifndef SYSLINK_PROC_MGR_H
+#define SYSLINK_PROC_MGR_H
+
+#include <linux/types.h>
+#include <syslink/multiproc.h>
+
+
+
+#define PROCMGR_MODULEID 0xf2ba
+
+/*
+ * Maximum name length for ProcMgr module strings.
+ */
+#define PROCMGR_MAX_STRLEN 32
+
+/*
+ * Maximum number of memory regions supported by ProcMgr module.
+ */
+#define PROCMGR_MAX_MEMORY_REGIONS 32
+
+/*
+ * IS_VALID_PROCID
+ * Checks if the Processor ID is valid
+ */
+#define IS_VALID_PROCID(id) (id < MULTIPROC_MAXPROCESSORS)
+
+
+/*
+ * Enumerations to indicate Processor states.
+ */
+enum proc_mgr_state {
+ PROC_MGR_STATE_UNKNOWN = 0,
+ /* Unknown Processor state (e.g. at startup or error). */
+ PROC_MGR_STATE_POWERED = 1,
+ /* Indicates the Processor is powered up. */
+ PROC_MGR_STATE_RESET = 2,
+ /* Indicates the Processor is reset. */
+ PROC_MGR_STATE_LOADED = 3,
+ /* Indicates the Processor is loaded. */
+ PROC_MGR_STATE_RUNNNING = 4,
+ /* Indicates the Processor is running. */
+ PROC_MGR_STATE_UNAVAILABLE = 5,
+ /* Indicates the Processor is unavailable to the physical transport. */
+ PROC_MGR_STATE_ENDVALUE = 6
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ * Enumerations to indicate different types of slave boot modes.
+ */
+enum proc_mgr_boot_mode {
+ PROC_MGR_BOOTMODE_BOOT = 0,
+ /* ProcMgr is responsible for loading the slave and its reset control */
+ PROC_MGR_BOOTMODE_NOLOAD = 1,
+ /* ProcMgr is not responsible for loading the slave. It is responsible
+ for reset control of the slave. */
+ PROC_MGR_BOOTMODE_NOBOOT = 2,
+ /* ProcMgr is not responsible for loading or reset control of the slave.
+ The slave either self-boots, or this is done by some entity outside of
+ the ProcMgr module. */
+ PROC_MGR_BOOTMODE_ENDVALUE = 3
+ /* End delimiter indicating start of invalid values for this enum */
+} ;
+
+/*
+ * Enumerations to indicate address types used for translation
+ */
+enum proc_mgr_addr_type{
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT = 0,
+ /* Kernel Virtual address on master processor */
+ PROC_MGR_ADDRTYPE_MASTERUSRVIRT = 1,
+ /* User Virtual address on master processor */
+ PROC_MGR_ADDRTYPE_SLAVEVIRT = 2,
+ /* Virtual address on slave processor */
+ PROC_MGR_ADDRTYPE_ENDVALUE = 3
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ * Enumerations to indicate types of address mapping
+ */
+enum proc_mgr_map_type {
+ PROC_MGR_MAPTYPE_VIRT = 0,
+ /* Map/unmap to virtual address space (kernel/user) */
+ PROC_MGR_MAPTYPE_SLAVE = 1,
+ /* Map/unmap to slave address space */
+ PROC_MGR_MAPTYPE_ENDVALUE = 2
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+/*
+ * Module configuration structure.
+ */
+struct proc_mgr_config {
+ void *gate_handle;
+} ;
+
+/*
+ * Configuration parameters specific to the slave ProcMgr instance.
+ */
+struct proc_mgr_params {
+ void *proc_handle;
+ /* void * to the Processor object associated with this ProcMgr. */
+ void *loader_handle;
+ /*!< Handle to the Loader object associated with this ProcMgr. */
+ void *pwr_handle;
+ /*!< Handle to the PwrMgr object associated with this ProcMgr. */
+};
+
+/*
+ * Configuration parameters specific to the slave ProcMgr instance.
+ */
+struct proc_mgr_attach_params {
+ enum proc_mgr_boot_mode boot_mode;
+ /* Boot mode for the slave processor. */
+} ;
+
+/*
+ * Configuration parameters to be provided while starting the slave
+ * processor.
+ */
+struct proc_mgr_start_params {
+ u32 proc_id;
+};
+
+/*
+ * Configuration parameters to be provided while stopping the slave
+ * processor.
+ */
+struct proc_mgr_stop_params {
+ u32 proc_id;
+};
+
+/*
+ * This structure defines information about memory regions mapped by
+ * the ProcMgr module.
+ */
+struct proc_mgr_addr_info {
+/* bool is_init; */
+ unsigned short is_init;
+ /* Is this memory region initialized? */
+ u32 addr[PROC_MGR_ADDRTYPE_ENDVALUE];
+ /* Addresses for each type of address space */
+ u32 size;
+ /* Size of the memory region in bytes */
+};
+
+/*
+ * Characteristics of the slave processor
+ */
+struct proc_mgr_proc_info {
+ enum proc_mgr_boot_mode boot_mode;
+ /* Boot mode of the processor. */
+ u16 num_mem_entries;
+ /* Number of valid memory entries */
+ struct proc_mgr_addr_info mem_entries[PROCMGR_MAX_MEMORY_REGIONS];
+ /* Configuration of memory regions */
+};
+
+
+/*
+ * Function pointer type that is passed to the proc_mgr_registerNotify function
+*/
+typedef int (*proc_mgr_callback_fxn)(u16 proc_id, void *handle,
+ enum proc_mgr_state from_state, enum proc_mgr_state to_state);
+
+/* Function to get the default configuration for the ProcMgr module. */
+void proc_mgr_get_config(struct proc_mgr_config *cfg);
+
+/* Function to setup the ProcMgr module. */
+int proc_mgr_setup(struct proc_mgr_config *cfg);
+
+/* Function to destroy the ProcMgr module. */
+int proc_mgr_destroy(void);
+
+/* Function to initialize the parameters for the ProcMgr instance. */
+void proc_mgr_params_init(void *handle, struct proc_mgr_params *params);
+
+/* Function to create a ProcMgr object for a specific slave processor. */
+void *proc_mgr_create(u16 proc_id, const struct proc_mgr_params *params);
+
+/* Function to delete a ProcMgr object for a specific slave processor. */
+int proc_mgr_delete(void **handle_ptr);
+
+/* Function to open a handle to an existing ProcMgr object handling the
+ * proc_id.
+ */
+int proc_mgr_open(void **handle, u16 proc_id);
+
+/* Function to close this handle to the ProcMgr instance. */
+int proc_mgr_close(void *handle);
+
+/* Function to initialize the parameters for the ProcMgr attach function. */
+void proc_mgr_get_attach_params(void *handle,
+ struct proc_mgr_attach_params *params);
+
+/* Function to attach the client to the specified slave and also initialize the
+ * slave(if required).
+ */
+int proc_mgr_attach(void *handle, struct proc_mgr_attach_params *params);
+
+/* Function to detach the client from the specified slave and also finalze the
+ * slave(if required).
+ */
+int proc_mgr_detach(void *handle);
+
+/* Function to initialize the parameters for the ProcMgr start function. */
+void proc_mgr_get_start_params(void *handle,
+ struct proc_mgr_start_params *params);
+
+/* Function to starts execution of the loaded code on the slave from the
+ * starting point specified in the slave executable loaded earlier by call to
+ * proc_mgr_load().
+ */
+int proc_mgr_start(void *handle, u32 entry_point,
+ struct proc_mgr_start_params *params);
+
+/* Function to stop execution of the slave Processor. */
+int proc_mgr_stop(void *handle, struct proc_mgr_stop_params *params);
+
+/* Function to get the current state of the slave Processor as maintained on
+ * the master Processor state machine.
+ */
+enum proc_mgr_state proc_mgr_get_state(void *handle);
+
+/* Function to read from the slave Processor's memory space. */
+int proc_mgr_read(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer);
+
+/* Function to read from the slave Processor's memory space. */
+int proc_mgr_write(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer);
+
+/* Function that provides a hook for performing device dependent operations on
+ * the slave Processor.
+ */
+int proc_mgr_control(void *handle, int cmd, void *arg);
+
+int proc_mgr_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type);
+
+/* Function that maps the specified slave address to master address space. */
+int proc_mgr_map(void *handle, u32 proc_addr, u32 size,
+ u32 *mappedAddr, u32 *mapped_size, u32 map_attribs);
+
+/* Function that unmaps the specified slave address to master address space. */
+int proc_mgr_unmap(void *handle, u32 mapped_addr);
+
+/* Function that registers for notification when the slave processor
+ * transitions to any of the states specified.
+ */
+int proc_mgr_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[]);
+
+/* Function that returns information about the characteristics of the slave
+ * processor.
+ */
+int proc_mgr_get_proc_info(void *handle, struct proc_mgr_proc_info *proc_info);
+
+/* Function that returns virtual to physical translations
+ */
+int proc_mgr_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries);
+
+#endif
diff --git a/arch/arm/plat-omap/include/syslink/sharedregion.h b/arch/arm/plat-omap/include/syslink/sharedregion.h
new file mode 100644
index 000000000000..75fe48049b5e
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/sharedregion.h
@@ -0,0 +1,110 @@
+/*
+ * sharedregion.h
+ *
+ * The SharedRegion module is designed to be used in a
+ * multi-processor environment where there are memory regions
+ * that are shared and accessed across different processors
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _SHAREDREGION_H_
+#define _SHAREDREGION_H_
+
+#include <linux/types.h>
+
+/*
+ * SHAREDREGION_MODULEID
+ * Module ID for Shared region manager
+ */
+#define SHAREDREGION_MODULEID (0x5D8A)
+
+/*
+ * Name of the reserved nameserver used for application
+ */
+#define SHAREDREGION_NAMESERVER "SHAREDREGION"
+
+/*
+ * Name of the reserved nameserver used for application
+ */
+#define SHAREDREGION_INVALIDSRPTR ((u32 *)0xFFFFFFFF)
+
+
+struct sharedregion_info {
+ bool is_valid; /* table entry is valid or not? */
+ void *base; /* Ptr to the base address of a table entry */
+ u32 len; /* The length of a table entry */
+};
+
+/*
+ * Module configuration structure
+ */
+struct sharedregion_config {
+ void *gate_handle;
+ void *heap_handle;
+ u32 max_regions;
+};
+
+/*
+ * Function to get the configuration
+ */
+int sharedregion_get_config(struct sharedregion_config *config);
+
+/*
+ * Function to setup the SharedRegion module
+ */
+int sharedregion_setup(const struct sharedregion_config *config);
+
+/*
+ * Function to destroy the SharedRegion module
+ */
+int sharedregion_destroy(void);
+
+/* Fucntion to Add a memory segment to the lookup table during
+ * runtime by base and length
+ */
+int sharedregion_add(u32 index, void *base, u32 len);
+
+/* Removes the memory segment at the specified index from the lookup
+ * table at runtime
+ */
+int sharedregion_remove(u32 index);
+
+/*
+ * Returns the index for the specified address pointer
+ */
+int sharedregion_get_index(void *addr);
+
+/*
+ * Returns the address pointer associated with the shared region pointer
+ */
+void *sharedregion_get_ptr(u32 *srptr);
+
+/*
+ * Returns the shared region pointer
+ */
+u32 *sharedregion_get_srptr(void *addr, int index);
+
+/*
+ * Gets the table entry information for the specified index and processor id
+ */
+int sharedregion_get_table_info(u32 index, u16 proc_id,
+ struct sharedregion_info *info);
+
+/*
+ * Sets the base address of the entry in the table
+ */
+int sharedregion_set_table_info(u32 index, u16 proc_id,
+ struct sharedregion_info *info);
+
+#endif /* _SHAREDREGION_H */
+
diff --git a/arch/arm/plat-omap/include/syslink/sharedregion_ioctl.h b/arch/arm/plat-omap/include/syslink/sharedregion_ioctl.h
new file mode 100644
index 000000000000..0b2857110991
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/sharedregion_ioctl.h
@@ -0,0 +1,181 @@
+/*
+ * sharedregion_ioctl.h
+ *
+ * The sharedregion module is designed to be used in a
+ * multi-processor environment where there are memory regions
+ * that are shared and accessed across different processors
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _SHAREDREGION_IOCTL_H
+#define _SHAREDREGION_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <ipc_ioctl.h>
+#include <sharedregion.h>
+
+enum CMD_SHAREDREGION {
+ SHAREDREGION_GETCONFIG = SHAREDREGION_BASE_CMD,
+ SHAREDREGION_SETUP,
+ SHAREDREGION_DESTROY,
+ SHAREDREGION_ADD,
+ SHAREDREGION_GETPTR,
+ SHAREDREGION_GETSRPTR,
+ SHAREDREGION_GETTABLEINFO,
+ SHAREDREGION_REMOVE,
+ SHAREDREGION_SETTABLEINFO,
+ SHAREDREGION_GETINDEX,
+};
+
+/*
+ * IOCTL command IDs for sharedregion
+ *
+ */
+
+/*
+ * Command for sharedregion_get_config
+ */
+#define CMD_SHAREDREGION_GETCONFIG _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_GETCONFIG, \
+ struct sharedregion_cmd_args)
+/*
+ * Command for sharedregion_setup
+ */
+#define CMD_SHAREDREGION_SETUP _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_SETUP, \
+ struct sharedregion_cmd_args)
+/*
+ * Command for sharedregion_setup
+ */
+#define CMD_SHAREDREGION_DESTROY _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_DESTROY, \
+ struct sharedregion_cmd_args)
+/*
+ * Command for sharedregion_ADD
+ */
+#define CMD_SHAREDREGION_ADD _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_ADD, \
+ struct sharedregion_cmd_args)
+/*
+ * Command for sharedregion_get_ptr
+ */
+#define CMD_SHAREDREGION_GETPTR _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_GETPTR, \
+ struct sharedregion_cmd_args)
+
+/*
+ * Command for sharedregion_get_srptr
+ */
+#define CMD_SHAREDREGION_GETSRPTR _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_GETSRPTR, \
+ struct sharedregion_cmd_args)
+
+/*
+ * Command for sharedregion_get_table_info
+ */
+#define CMD_SHAREDREGION_GETTABLEINFO _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_GETTABLEINFO, \
+ struct sharedregion_cmd_args)
+
+/*
+ * Command for sharedregion_remove
+ */
+#define CMD_SHAREDREGION_REMOVE _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_REMOVE, \
+ struct sharedregion_cmd_args)
+/*
+ * Command for sharedregion_set_table_info
+ */
+#define CMD_SHAREDREGION_SETTABLEINFO _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_SETTABLEINFO, \
+ struct sharedregion_cmd_args)
+
+/*
+ * Command for sharedregion_get_index
+ */
+#define CMD_SHAREDREGION_GETINDEX _IOWR(IPC_IOC_MAGIC, \
+ SHAREDREGION_GETINDEX, \
+ struct sharedregion_cmd_args)
+
+/*
+ * Command arguments for sharedregion
+ */
+union sharedregion_arg {
+ struct {
+ struct sharedregion_config *config;
+ } get_config;
+
+ struct {
+ struct sharedregion_config *config;
+ struct sharedregion_config *default_cfg;
+ struct sharedregion_info *table;
+ } setup;
+
+ struct {
+ u32 index;
+ void *base;
+ u32 len;
+ } add;
+
+ struct {
+ void *addr;
+ s32 index;
+ } get_index;
+
+ struct {
+ u32 *srptr;
+ void *addr;
+ } get_ptr;
+
+ struct {
+ u32 *srptr;
+ void *addr;
+ s32 index;
+ } get_srptr;
+
+ struct {
+ u32 index;
+ u16 proc_id;
+ struct sharedregion_info *info;
+ } get_table_info;
+
+ struct {
+ u32 index;
+ } remove;
+
+ struct {
+ u32 index;
+ u16 proc_id;
+ struct sharedregion_info *info;
+ } set_table_info;
+} ;
+
+/*
+ * Command arguments for sharedregion
+ */
+struct sharedregion_cmd_args {
+ union sharedregion_arg args;
+ s32 api_status;
+};
+
+/*
+ * This ioctl interface for sharedregion module
+ */
+int sharedregion_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _SHAREDREGION_IOCTL_H */
+
+
diff --git a/arch/arm/plat-omap/include/syslink/sysmemmgr.h b/arch/arm/plat-omap/include/syslink/sysmemmgr.h
new file mode 100644
index 000000000000..34c3b4182288
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/sysmemmgr.h
@@ -0,0 +1,179 @@
+/*
+ * sysmemmgr.h
+ *
+ * Manager for the Slave system memory. Slave system level memory is allocated
+ * through this module.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+#ifndef _SYSTEMMEMORYMANAGER_H_
+#define _SYSTEMMEMORYMANAGER_H_
+
+
+/*!
+ * @def SYSMEMMGR_MODULEID
+ * @brief Module identifier for System memory manager.
+ */
+#define SYSMEMMGR_MODULEID (0xb53d)
+
+/*!
+ * @def SYSMEMMGR_STATUSCODEBASE
+ * @brief Error code base for system memory manager module.
+ */
+#define SYSMEMMGR_STATUSCODEBASE (SYSMEMMGR_MODULEID << 12u)
+
+/*!
+ * @def SYSMEMMGR_MAKE_ERROR
+ * @brief Macro to make error code.
+ */
+#define SYSMEMMGR_MAKE_ERROR(x) ((int)(0x80000000 + \
+ (SYSMEMMGR_STATUSCODEBASE + \
+ (x))))
+
+/*!
+ * @def SYSMEMMGR_MAKE_SUCCESS
+ * @brief Macro to make success code.
+ */
+#define SYSMEMMGR_MAKE_SUCCESS(x) (SYSMEMMGR_STATUSCODEBASE + (x))
+
+/*!
+ * @def SYSMEMMGR_E_CREATEALLOCATOR
+ * @brief Static allocator creation failed.
+ */
+#define SYSMEMMGR_E_CREATEALLOCATOR SYSMEMMGR_MAKE_ERROR(1)
+
+/*!
+ * @def SYSMEMMGR_E_CREATELOCK
+ * @brief Mutex lock creation failed.
+ */
+#define SYSMEMMGR_E_CREATELOCK SYSMEMMGR_MAKE_ERROR(2)
+
+/*!
+ * @def SYSMEMMGR_E_INVALIDSTATE
+ * @brief Module is not initialized.
+ */
+#define SYSMEMMGR_E_INVALIDSTATE SYSMEMMGR_MAKE_ERROR(3)
+
+/*!
+ * @def SYSMEMMGR_E_INVALIDARG
+ * @brief Argument passed to a function is invalid.
+ */
+#define SYSMEMMGR_E_INVALIDARG SYSMEMMGR_MAKE_ERROR(4)
+
+/*!
+ * @def SYSMEMMGR_E_BPAFREE
+ * @brief Freeing to buddy allocator failed.
+ */
+#define SYSMEMMGR_E_BPAFREE SYSMEMMGR_MAKE_ERROR(5)
+
+/*!
+ * @def SYSMEMMGR_E_MEMORY
+ * @brief Memory allocation failed.
+ */
+#define SYSMEMMGR_E_MEMORY SYSMEMMGR_MAKE_ERROR(6)
+
+/*!
+ * @def SYSMEMMGR_SUCCESS
+ * @brief Operation successful.
+ */
+#define SYSMEMMGR_SUCCESS SYSMEMMGR_MAKE_SUCCESS(0)
+
+/*!
+ * @def SYSMEMMGR_S_ALREADYSETUP
+ * @brief Module already initialized.
+ */
+#define SYSMEMMGR_S_ALREADYSETUP SYSMEMMGR_MAKE_SUCCESS(1)
+
+/*!
+ * @def SYSMEMMGR_S_DRVALREADYOPENED
+ * @brief Internal OS Driver is already opened.
+ */
+#define SYSMEMMGR_S_DRVALREADYOPENED SYSMEMMGR_MAKE_SUCCESS(2)
+
+/*!
+ * @brief Configuration data structure of system memory manager.
+ */
+struct sysmemmgr_config {
+ u32 sizeof_valloc;
+ /* Total size for virtual memory allocation */
+ u32 sizeof_palloc;
+ /* Total size for physical memory allocation */
+ u32 static_phys_base_addr;
+ /* Physical address of static memory region */
+ u32 static_virt_base_addr;
+ /* Virtual address of static memory region */
+ u32 static_mem_size;
+ /* size of static memory region */
+ u32 page_size;
+ /* Page size */
+ u32 event_no;
+ /* Event number to be used */
+};
+
+/*!
+ * @brief Flag used for allocating memory blocks.
+ */
+enum sysmemmgr_allocflag {
+ sysmemmgr_allocflag_uncached = 0u,
+ /* Flag used for allocating uncacheable block */
+ sysmemmgr_allocflag_cached = 1u,
+ /* Flag used for allocating cacheable block */
+ sysmemmgr_allocflag_physical = 2u,
+ /* Flag used for allocating physically contiguous block */
+ sysmemmgr_allocflag_virtual = 3u,
+ /* Flag used for allocating virtually contiguous block */
+ sysmemmgr_allocflag_dma = 4u
+ /* Flag used for allocating DMAable (physically contiguous) block */
+};
+
+/*!
+ * @brief Flag used for translating address.
+ */
+enum sysmemmgr_xltflag {
+ sysmemmgr_xltflag_kvirt2phys = 0x0001u,
+ /* Flag used for converting Kernel virtual address to physical
+ * address */
+ sysmemmgr_xltflag_kvirt2uvirt = 0x0002u,
+ /* Flag used for converting Kernel virtual address to user virtual
+ * address */
+ sysmemmgr_xltflag_uvirt2phys = 0x0003u,
+ /* Flag used for converting user virtual address to physical address */
+ sysmemmgr_xltflag_uvirt2kvirt = 0x0004u,
+ /* Flag used for converting user virtual address to Kernel virtual
+ * address */
+ sysmemmgr_xltflag_phys2kvirt = 0x0005u,
+ /* Flag used for converting physical address to user virtual address */
+ sysmemmgr_xltflag_phys2uvirt = 0x0006u
+ /* Flag used for converting physical address to Kernel virtual
+ * address */
+};
+
+
+/* Function prototypes */
+void sysmemmgr_get_config(struct sysmemmgr_config *config);
+
+int sysmemmgr_setup(struct sysmemmgr_config *params);
+
+int sysmemmgr_destroy(void);
+
+int sysmemmgr_attach(u16 slave_id);
+
+void *sysmemmgr_alloc(u32 size, enum sysmemmgr_allocflag flag);
+
+int sysmemmgr_free(void *blk, u32 size, enum sysmemmgr_allocflag flag);
+
+void *sysmemmgr_translate(void *srcAddr, enum sysmemmgr_xltflag flag);
+
+
+#endif /* _SYSTEMMEMORYMANAGER_H_ */
diff --git a/arch/arm/plat-omap/include/syslink/sysmemmgr_ioctl.h b/arch/arm/plat-omap/include/syslink/sysmemmgr_ioctl.h
new file mode 100644
index 000000000000..4b0d99615560
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/sysmemmgr_ioctl.h
@@ -0,0 +1,130 @@
+/*
+ * sysmemmgr_ioctl.h
+ *
+ * Definitions of sysmemmgr driver types and structures..
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _SYSMEMMGR_IOCTL_H_
+#define _SYSMEMMGR_IOCTL_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Syslink headers */
+#include <ipc_ioctl.h>
+#include <sysmgr.h>
+
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for sysmemmgr
+ * ----------------------------------------------------------------------------
+ */
+/* IOC Magic Number for sysmemmgr */
+#define SYSMEMMGR_IOC_MAGIC IPC_IOC_MAGIC
+
+/* IOCTL command numbers for sysmemmgr */
+enum sysmemmgr_drv_cmd {
+ SYSMEMMGR_GETCONFIG = SYSMEMMGR_BASE_CMD,
+ SYSMEMMGR_SETUP,
+ SYSMEMMGR_DESTROY,
+ SYSMEMMGR_ALLOC,
+ SYSMEMMGR_FREE,
+ SYSMEMMGR_TRANSLATE
+};
+
+/* Command for sysmemmgr_getConfig */
+#define CMD_SYSMEMMGR_GETCONFIG \
+ _IOWR(SYSMEMMGR_IOC_MAGIC, SYSMEMMGR_GETCONFIG, \
+ struct sysmemmgr_cmd_args)
+
+/* Command for sysmemmgr_setup */
+#define CMD_SYSMEMMGR_SETUP \
+ _IOWR(SYSMEMMGR_IOC_MAGIC, SYSMEMMGR_SETUP, \
+ struct sysmemmgr_cmd_args)
+
+/* Command for sysmemmgr_destroy */
+#define CMD_SYSMEMMGR_DESTROY \
+ _IOWR(SYSMEMMGR_IOC_MAGIC, SYSMEMMGR_DESTROY, \
+ struct sysmemmgr_cmd_args)
+
+/* Command for sysmemmgr_alloc */
+#define CMD_SYSMEMMGR_ALLOC \
+ _IOWR(SYSMEMMGR_IOC_MAGIC, SYSMEMMGR_ALLOC, \
+ struct sysmemmgr_cmd_args)
+
+/* Command for sysmemmgr_free */
+#define CMD_SYSMEMMGR_FREE \
+ _IOWR(SYSMEMMGR_IOC_MAGIC, SYSMEMMGR_FREE, \
+ struct sysmemmgr_cmd_args)
+
+/* Command for sysmemmgr_translate */
+#define CMD_SYSMEMMGR_TRANSLATE \
+ _IOWR(SYSMEMMGR_IOC_MAGIC, SYSMEMMGR_TRANSLATE, \
+ struct sysmemmgr_cmd_args)
+
+
+/* ----------------------------------------------------------------------------
+ * Command arguments for sysmemmgr
+ * ----------------------------------------------------------------------------
+ */
+/* Command arguments for sysmemmgr */
+struct sysmemmgr_cmd_args {
+ union {
+ struct {
+ struct sysmemmgr_config *config;
+ } get_config;
+
+ struct {
+ struct sysmemmgr_config *config;
+ } setup;
+
+ struct {
+ u32 size;
+ void *buf;
+ void *phys;
+ void *kbuf;
+ enum sysmemmgr_allocflag flags;
+ } alloc;
+
+ struct {
+ u32 size;
+ void *buf;
+ void *phys;
+ void *kbuf;
+ enum sysmemmgr_allocflag flags;
+ } free;
+
+ struct {
+ void *buf;
+ void *ret_ptr;
+ enum sysmemmgr_xltflag flags;
+ } translate;
+ } args;
+
+ s32 api_status;
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL functions for sysmemmgr module
+ * ----------------------------------------------------------------------------
+ */
+/* ioctl interface function for sysmemmgr */
+int sysmemmgr_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* SYSMEMMGR_DRVDEFS_H_0xF414 */
diff --git a/arch/arm/plat-omap/include/syslink/sysmgr.h b/arch/arm/plat-omap/include/syslink/sysmgr.h
new file mode 100644
index 000000000000..19fab220b2c4
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/sysmgr.h
@@ -0,0 +1,182 @@
+/*
+ * sysmgr.h
+ *
+ * Defines for System manager.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _SYSMGR_H_
+#define _SYSMGR_H_
+
+
+/* Module headers */
+#include <multiproc.h>
+#include <gatepeterson.h>
+#include <sharedregion.h>
+#include <listmp.h>
+#include <listmp_sharedmemory.h>
+#include <messageq.h>
+#include <messageq_transportshm.h>
+#include <notify.h>
+#include <notify_ducatidriver.h>
+#include <nameserver.h>
+#include <nameserver_remote.h>
+#include <nameserver_remotenotify.h>
+#include <procmgr.h>
+#include <heap.h>
+#include <heapbuf.h>
+#include <sysmemmgr.h>
+
+
+/*!
+ * @def SYSMGR_MODULEID
+ * @brief Unique module ID.
+ */
+#define SYSMGR_MODULEID (0xF086)
+
+
+/* =============================================================================
+ * Module Success and Failure codes
+ * =============================================================================
+ */
+/*!
+ * @def SYSMGR_STATUSCODEBASE
+ * @brief Error code base for System manager.
+ */
+#define SYSMGR_STATUSCODEBASE (SYSMGR_MODULEID << 12u)
+
+/*!
+ * @def SYSMGR_MAKE_FAILURE
+ * @brief Macro to make error code.
+ */
+#define SYSMGR_MAKE_FAILURE(x) ((s32)(0x80000000 + \
+ (SYSMGR_STATUSCODEBASE + \
+ (x))))
+
+/*!
+ * @def SYSMGR_MAKE_SUCCESS
+ * @brief Macro to make success code.
+ */
+#define SYSMGR_MAKE_SUCCESS(x) (SYSMGR_STATUSCODEBASE + (x))
+
+/*!
+ * @def SYSMGR_E_INVALIDARG
+ * @brief Argument passed to a function is invalid.
+ */
+#define SYSMGR_E_INVALIDARG SYSMGR_MAKE_FAILURE(1)
+
+/*!
+ * @def SYSMGR_E_MEMORY
+ * @brief Memory allocation failed.
+ */
+#define SYSMGR_E_MEMORY SYSMGR_MAKE_FAILURE(2)
+
+/*!
+ * @def SYSMGR_E_FAIL
+ * @brief General failure.
+ */
+#define SYSMGR_E_FAIL SYSMGR_MAKE_FAILURE(3)
+
+/*!
+ * @def SYSMGR_E_INVALIDSTATE
+ * @brief Module is in invalid state.
+ */
+#define SYSMGR_E_INVALIDSTATE SYSMGR_MAKE_FAILURE(4)
+
+/*!
+ * @def SYSMGR_E_OSFAILURE
+ * @brief Failure in OS call.
+ */
+#define SYSMGR_E_OSFAILURE SYSMGR_MAKE_FAILURE(5)
+
+/*!
+ * @def SYSMGR_S_ALREADYSETUP
+ * @brief Module is already initialized.
+ */
+#define SYSMGR_S_ALREADYSETUP SYSMGR_MAKE_SUCCESS(1)
+
+/*!
+ * @def SYSMGR_CMD_SCALABILITY
+ * @brief Command ID for scalability info.
+ */
+#define SYSMGR_CMD_SCALABILITY (0x00000000)
+
+/*!
+ * @def SYSMGR_CMD_SHAREDREGION_ENTRY_BASE
+ * @brief Base of command IDs for entries used by Shared region.
+ */
+#define SYSMGR_CMD_SHAREDREGION_ENTRY_START (0x00000001)
+#define SYSMGR_CMD_SHAREDREGION_ENTRY_END (0x00001000)
+
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/*!
+ * @brief Structure defining config parameters for overall System.
+ */
+struct sysmgr_config {
+ struct sysmemmgr_config sysmemmgr_cfg;
+ /*!< System memory manager config parameter */
+
+ struct multiproc_config multiproc_cfg;
+ /*!< Multiproc config parameter */
+
+ struct gatepeterson_config gatepeterson_cfg;
+ /*!< Gatepeterson config parameter */
+
+ struct sharedregion_config sharedregion_cfg;
+ /*!< SharedRegion config parameter */
+
+ struct messageq_config messageq_cfg;
+ /*!< MessageQ config parameter */
+
+ struct notify_config notify_cfg;
+ /*!< Notify config parameter */
+
+ struct proc_mgr_config proc_mgr_cfg;
+ /*!< Processor manager config parameter */
+
+ struct heapbuf_config heapbuf_cfg;
+ /*!< Heap Buf config parameter */
+
+ struct listmp_config listmp_sharedmemory_cfg;
+ /*!< ListMPSharedMemory config parameter */
+
+ struct messageq_transportshm_config messageq_transportshm_cfg;
+ /*!< MessageQTransportShm config parameter */
+
+ struct notify_ducatidrv_config notify_ducatidrv_cfg;
+ /*!< NotifyDriverShm config parameter */
+
+ struct nameserver_remotenotify_config nameserver_remotenotify_cfg;
+ /*!< NameServerRemoteNotify config parameter */
+};
+
+
+/* =============================================================================
+ * APIs
+ * =============================================================================
+ */
+/* Function to initialize the parameter structure */
+void sysmgr_get_config(struct sysmgr_config *config);
+
+/* Function to initialize sysmgr module */
+s32 sysmgr_setup(const struct sysmgr_config *config);
+
+/* Function to Finalize sysmgr module */
+s32 sysmgr_destroy(void);
+
+
+#endif /* ifndef SYSMGR_H_0xF086 */
diff --git a/arch/arm/plat-omap/include/syslink/sysmgr_ioctl.h b/arch/arm/plat-omap/include/syslink/sysmgr_ioctl.h
new file mode 100644
index 000000000000..03db7b9511a2
--- /dev/null
+++ b/arch/arm/plat-omap/include/syslink/sysmgr_ioctl.h
@@ -0,0 +1,100 @@
+/*
+ * sysmgr_ioctl.h
+ *
+ * Definitions of sysmgr driver types and structures..
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _SYSMGR_IOCTL_H_
+#define _SYSMGR_IOCTL_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Syslink headers */
+#include <ipc_ioctl.h>
+#include <sysmgr.h>
+
+
+/* =============================================================================
+ * Macros and types
+ * =============================================================================
+ */
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for sysmgr
+ * ----------------------------------------------------------------------------
+ */
+/* IOC Magic Number for sysmgr */
+#define SYSMGR_IOC_MAGIC IPC_IOC_MAGIC
+
+/* IOCTL command numbers for sysmgr */
+enum sysmgr_drv_cmd {
+ SYSMGR_SETUP = SYSMGR_BASE_CMD,
+ SYSMGR_DESTROY,
+ SYSMGR_LOADCALLBACK,
+ SYSMGR_STARTCALLBACK,
+ SYSMGR_STOPCALLBACK
+};
+
+/* Command for sysmgr_setup */
+#define CMD_SYSMGR_SETUP \
+ _IOWR(SYSMGR_IOC_MAGIC, SYSMGR_SETUP, \
+ struct sysmgr_cmd_args)
+
+/* Command for sysmgr_destroy */
+#define CMD_SYSMGR_DESTROY \
+ _IOWR(SYSMGR_IOC_MAGIC, SYSMGR_DESTROY, \
+ struct sysmgr_cmd_args)
+
+/* Command for load callback */
+#define CMD_SYSMGR_LOADCALLBACK \
+ _IOWR(SYSMGR_IOC_MAGIC, SYSMGR_LOADCALLBACK, \
+ struct sysmgr_cmd_args)
+
+/* Command for load callback */
+#define CMD_SYSMGR_STARTCALLBACK \
+ _IOWR(SYSMGR_IOC_MAGIC, SYSMGR_STARTCALLBACK, \
+ struct sysmgr_cmd_args)
+
+/* Command for stop callback */
+#define CMD_SYSMGR_STOPCALLBACK \
+ _IOWR(SYSMGR_IOC_MAGIC, SYSMGR_STOPCALLBACK, \
+ struct sysmgr_cmd_args)
+
+
+/* ----------------------------------------------------------------------------
+ * Command arguments for sysmgr
+ * ----------------------------------------------------------------------------
+ */
+/* Command arguments for sysmgr */
+struct sysmgr_cmd_args {
+ union {
+ struct {
+ struct sysmgr_config *config;
+ } setup;
+
+ int proc_id;
+ } args;
+
+ s32 api_status;
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTL functions for sysmgr module
+ * ----------------------------------------------------------------------------
+ */
+/* ioctl interface function for sysmgr */
+int sysmgr_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args);
+
+#endif /* _SYSMGR_IOCTL_H_ */
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index c0ff1e39d893..761df23150fd 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -171,15 +171,12 @@ static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
l->base = MMU_LOCK_BASE(val);
l->vict = MMU_LOCK_VICT(val);
- BUG_ON(l->base != 0); /* Currently no preservation is used */
}
static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
{
u32 val;
- BUG_ON(l->base != 0); /* Currently no preservation is used */
-
val = (l->base << MMU_LOCK_BASE_SHIFT);
val |= (l->vict << MMU_LOCK_VICT_SHIFT);
@@ -241,7 +238,7 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
break;
}
- if (i == obj->nr_tlb_entries) {
+ if (i == obj->nr_tlb_entries || (l.base == obj->nr_tlb_entries)) {
dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
err = -EBUSY;
goto out;
@@ -252,13 +249,18 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
clk_disable(obj->clk);
return PTR_ERR(cr);
}
-
iotlb_load_cr(obj, cr);
kfree(cr);
+ /* Increment base number if preservation is set */
+ if (e->prsvd)
+ l.base++;
/* increment victim for next tlb load */
- if (++l.vict == obj->nr_tlb_entries)
- l.vict = 0;
+ if (++l.vict == obj->nr_tlb_entries) {
+ l.vict = l.base;
+ goto out;
+ }
+
iotlb_lock_set(obj, &l);
out:
clk_disable(obj->clk);
@@ -862,10 +864,12 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
if (!obj)
return -ENOMEM;
- obj->clk = clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(obj->clk))
- goto err_clk;
-
+ /* FIX ME: OMAP4 PM framework not ready */
+ if (!cpu_is_omap44xx()) {
+ obj->clk = clk_get(&pdev->dev, pdata->clk_name);
+ if (IS_ERR(obj->clk))
+ goto err_clk;
+ }
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->dev = &pdev->dev;
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 8e90633e4cb9..bf239b682053 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -28,6 +28,7 @@
#include <plat/mailbox.h>
+static struct workqueue_struct *mboxd;
static struct omap_mbox *mboxes;
static DEFINE_RWLOCK(mboxes_lock);
@@ -70,11 +71,10 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
int ret = 0, i = 1000;
while (mbox_fifo_full(mbox)) {
- if (mbox->ops->type == OMAP_MBOX_TYPE2)
- return -1;
if (--i == 0)
return -1;
udelay(1);
+ printk(KERN_ERR "Mailbox FIFO full %d\n", i);
}
mbox_fifo_write(mbox, msg);
return ret;
@@ -83,7 +83,13 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
+ /* Directly calling __mbox_msg_send since Tesla is already running
+ in a tasklet */
+ return __mbox_msg_send(mbox, msg);
+ /* FIXME Work queue is not used to send mailbox messages.
+ Directly calling __mbox_msg_send().*/
+#if 0
struct request *rq;
struct request_queue *q = mbox->txq->queue;
@@ -95,6 +101,7 @@ int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
tasklet_schedule(&mbox->txq->tasklet);
return 0;
+#endif
}
EXPORT_SYMBOL(omap_mbox_msg_send);
@@ -135,6 +142,7 @@ static void mbox_rx_work(struct work_struct *work)
mbox_msg_t msg;
unsigned long flags;
+
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
rq = blk_fetch_request(q);
@@ -146,6 +154,7 @@ static void mbox_rx_work(struct work_struct *work)
blk_end_request_all(rq, 0);
mbox->rxq->callback((void *)msg);
}
+
}
/*
@@ -179,7 +188,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
msg = mbox_fifo_read(mbox);
-
+ rq->special = (void *)msg;
blk_insert_request(q, rq, 0, (void *)msg);
if (mbox->ops->type == OMAP_MBOX_TYPE1)
break;
@@ -188,7 +197,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
/* no more messages in the fifo. clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
nomem:
- schedule_work(&mbox->rxq->work);
+ queue_work(mboxd, &mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
@@ -401,12 +410,17 @@ EXPORT_SYMBOL(omap_mbox_unregister);
static int __init omap_mbox_init(void)
{
+ mboxd = create_workqueue("mboxd");
+ if (!mboxd)
+ return -ENOMEM;
+
return 0;
}
module_init(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
+ destroy_workqueue(mboxd);
}
module_exit(omap_mbox_exit);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 8eb8fb8654e6..5671c3f9444a 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -37,6 +37,7 @@ void omap_mcbsp_write(void __iomem *io_base, u16 reg, u32 val)
else
__raw_writel(val, io_base + reg);
}
+EXPORT_SYMBOL(omap_mcbsp_write);
int omap_mcbsp_read(void __iomem *io_base, u16 reg)
{
@@ -45,6 +46,7 @@ int omap_mcbsp_read(void __iomem *io_base, u16 reg)
else
return __raw_readl(io_base + reg);
}
+EXPORT_SYMBOL(omap_mcbsp_read);
#define OMAP_MCBSP_READ(base, reg) \
omap_mcbsp_read(base, OMAP_MCBSP_REG_##reg)
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f60a5400a25b..bf4734bf50fa 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -525,7 +525,7 @@ static int __init vfp_init(void)
*/
elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
- if (VFP_arch >= 3) {
+ if (VFP_arch >= 2) {
elf_hwcap |= HWCAP_VFPv3;
/*
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8a07363417ed..5f74e0e182e9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -113,4 +113,11 @@ source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
+
+source "drivers/media/video/dmm/Kconfig"
+
+source "drivers/media/video/tiler/Kconfig"
+
+source "drivers/dsp/syslink/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a1..1b398f902259 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -111,3 +111,10 @@ obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
obj-y += ieee802154/
+obj-$(CONFIG_DMM_OMAP) += media/
+obj-$(CONFIG_TILER_OMAP) += media/
+obj-$(CONFIG_MPU_SYSLINK_IPC) += dsp/syslink/multicore_ipc/
+obj-$(CONFIG_MPU_BRIDGE_NOTIFY) += dsp/syslink/omap_notify/
+obj-$(CONFIG_NOTIFY_DUCATI) += dsp/syslink/notify_ducatidriver/
+obj-$(CONFIG_SYSLINK_PROC) += dsp/syslink/procmgr/
+obj-$(CONFIG_SYSLINK_PROC4430) += dsp/syslink/procmgr/proc4430/
diff --git a/drivers/dsp/syslink/Kconfig b/drivers/dsp/syslink/Kconfig
new file mode 100755
index 000000000000..63752e2f2a5d
--- /dev/null
+++ b/drivers/dsp/syslink/Kconfig
@@ -0,0 +1,62 @@
+menuconfig Sys_Link
+ bool "Sys_Link"
+ default y
+if Sys_Link
+
+config SYSLINK_PROC
+ tristate "Syslink ProcMgr"
+ default y
+ help
+ Syslink Proc manager
+
+config SYSLINK_PROC4430
+ tristate "Proc 4430"
+ depends on SYSLINK_PROC
+ default y
+ help
+ Ducati Proc implementation
+
+config MPU_BRIDGE_NOTIFY
+ tristate "OMAP Notify Module"
+ default y
+ select OMAP_MBOX_FWK
+ help
+ Notify Module
+
+
+config NOTIFY_DUCATI
+ tristate "OMAP Notify Ducati Module"
+ depends on MPU_BRIDGE_NOTIFY && SYSLINK_PROC4430
+ default y
+ help
+ Notify Ducati Module
+
+config MPU_SYSLINK_IPC
+ tristate "Syslink IPC Module"
+ depends on MPU_BRIDGE_NOTIFY && NOTIFY_DUCATI
+ default y
+ help
+ Syslink IPC Module
+
+config SYSLINK_USE_SYSMGR
+ bool "Enable SYS MGR setup"
+ depends on MPU_SYSLINK_IPC && SYSLINK_PROC
+ default y
+ help
+ This is the experimental option to enable SYS manager setup
+
+config OMAP_IOMMU
+ tristate "IOMMU"
+ default y
+ help
+ Select IOMMU module for managing ducati mmu
+
+config OMAP_IOMMU_DEBUG_MODULE
+ bool "IOMMU debugging"
+ depends on OMAP_IOMMU
+ default y
+endif
+
+
+
+
diff --git a/drivers/dsp/syslink/multicore_ipc/Kbuild b/drivers/dsp/syslink/multicore_ipc/Kbuild
new file mode 100644
index 000000000000..2377e20c76b1
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/Kbuild
@@ -0,0 +1,25 @@
+libsyslink_ipc = multiproc.o multiproc_ioctl.o nameserver.o \
+nameserver_ioctl.o heap.o heapbuf.o heapbuf_ioctl.o \
+gatepeterson.o gatepeterson_ioctl.o sharedregion.o sharedregion_ioctl.o \
+nameserver_remote.o nameserver_remotenotify.o listmp_sharedmemory.o \
+listmp.o listmp_sharedmemory_ioctl.o messageq.o messageq_ioctl.o \
+messageq_transportshm.o messageq_transportshm_ioctl.o \
+nameserver_remotenotify_ioctl.o platform_mem.o sysmgr.o sysmgr_ioctl.o \
+sysmemmgr.o sysmemmgr_ioctl.o platformcfg.o platform.o ipc_ioctl.o ipc_drv.o
+
+obj-$(CONFIG_MPU_SYSLINK_IPC) += syslink_ipc.o
+syslink_ipc-objs = $(libservices) $(libsyslink_ipc)
+
+ccflags-y += -Wno-strict-prototypes
+
+#Machine dependent
+ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
+ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \
+ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS \
+ -DCONFIG_DISABLE_BRIDGE_PM -DDSP_TRACEBUF_DISABLED
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include
+ccflags-y += -Iarch/arm/plat-omap/include/syslink
+ccflags-y += -Iarch/arm/plat-omap/include/dspbridge
+
diff --git a/drivers/dsp/syslink/multicore_ipc/_listmp.h b/drivers/dsp/syslink/multicore_ipc/_listmp.h
new file mode 100644
index 000000000000..13223dc2610b
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/_listmp.h
@@ -0,0 +1,48 @@
+/*
+ * _listmp.h
+ *
+ * Internal definitions for Internal Defines for shared memory
+ * doubly linked list.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef __LISTMP_H_
+#define __LISTMP_H_
+
+/* Standard headers */
+#include <linux/types.h>
+
+/*!
+ * @brief Structure defining attribute parameters for the
+ * ListMP module.
+ */
+struct listmp_attrs {
+ u32 version;
+ /*!< Version of module */
+ u32 status;
+ /*!< Status of module */
+ u32 shared_addr_size;
+ /*!< Shared address size of module */
+};
+
+/*!
+ * @brief Structure defining processor related information for the
+ * ListMP module.
+ */
+struct listmp_proc_attrs {
+ bool creator; /*!< Creator or opener */
+ u16 proc_id; /*!< Processor Identifier */
+ u32 open_count; /*!< How many times it is opened on a processor */
+};
+
+#endif /* __LISTMP_H_ */
diff --git a/drivers/dsp/syslink/multicore_ipc/gate_remote.c b/drivers/dsp/syslink/multicore_ipc/gate_remote.c
new file mode 100644
index 000000000000..b5cf6871c8b9
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gate_remote.c
@@ -0,0 +1,40 @@
+/*
+ * gate_remote.c
+ *
+ * This includes the functions to handle remote gates
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/*
+ * ======== gate_remote_enter ========
+ * Purpose:
+ * This function is used to enter in to a remote gate
+ */
+int gate_remote_enter(void *ghandle)
+{
+ return 0;
+}
+
+/*
+ * ======== gate_remote_leave ========
+ * Purpose:
+ * This function is used to leave from a remote gate
+ */
+int gate_remote_leave(void *ghandle, u32 key)
+{
+ key = 0;
+ return 0;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/gatepeterson.c b/drivers/dsp/syslink/multicore_ipc/gatepeterson.c
new file mode 100755
index 000000000000..d885f12b6a84
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gatepeterson.c
@@ -0,0 +1,964 @@
+/*
+ * gatepeterson.c
+ *
+ * The Gate Peterson Algorithm for mutual exclusion of shared memory.
+ * Current implementation works for 2 processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <syslink/atomic_linux.h>
+#include <multiproc.h>
+#include <nameserver.h>
+#include <sharedregion.h>
+#include <gatepeterson.h>
+
+
+/* IPC stubs */
+
+/*
+ * Name of the reserved NameServer used for gatepeterson
+ */
+#define GATEPETERSON_NAMESERVER "GatePeterson"
+#define GATEPETERSON_BUSY 1
+#define GATEPETERSON_FREE 0
+#define GATEPETERSON_VERSION 1
+#define GATEPETERSON_CREATED 0x08201997 /* Stamp to indicate GP
+ was created here */
+#define MAX_GATEPETERSON_NAME_LEN 32
+
+/* Cache line size */
+#define GATEPETERSON_CACHESIZE 128
+
+/* Macro to make a correct module magic number with ref_count */
+#define GATEPETERSON_MAKE_MAGICSTAMP(x) ((GATEPETERSON_MODULEID << 12) | (x))
+
+/*
+ * structure for gatepeterson module state
+ */
+struct gatepeterson_moduleobject {
+ atomic_t ref_count; /* Reference count */
+ void *nshandle;
+ struct list_head obj_list;
+ struct mutex *mod_lock; /* Lock for obj list */
+ struct gatepeterson_config cfg;
+ struct gatepeterson_config default_cfg;
+ struct gatepeterson_params def_inst_params; /* default instance
+ paramters */
+};
+
+/*
+ * Structure defining attribute parameters for the Gate Peterson module
+ */
+struct gatepeterson_attrs {
+ VOLATILE u32 version;
+ VOLATILE u32 status;
+ VOLATILE u16 creator_proc_id;
+ VOLATILE u16 opener_proc_id;
+};
+
+/*
+ * Structure defining internal object for the Gate Peterson
+ */
+struct gatepeterson_obj {
+ struct list_head elem;
+ VOLATILE struct gatepeterson_attrs *attrs; /* Instance attr */
+ VOLATILE u32 *flag[2]; /* Falgs for processors */
+ VOLATILE u32 *turn; /* Indicates whoes turn it is now? */
+ u8 self_id; /* Self identifier */
+ u8 other_id; /* Other's identifier */
+ u32 nested; /* Counter to track nesting */
+ void *local_gate; /* Local lock handle */
+ void *ns_key; /* NameServer key received in create */
+ enum gatepeterson_protect local_protection; /* Type of local protection
+ to be used */
+ struct gatepeterson_params params;
+ void *top; /* Pointer to the top Object */
+ u32 ref_count; /* Local reference count */
+};
+
+/*
+ * Structure defining object for the Gate Peterson
+ */
+struct gatepeterson_object {
+ void *(*lock_get_knl_handle)(void **handle); /* Pointer to
+ Kernl object will be returned */
+ u32 (*enter)(void *handle); /* Function to enter GP */
+ void (*leave)(void *handle, u32 key); /* Function to leave GP */
+ struct gatepeterson_obj *obj; /* Pointer to GP internal object */
+};
+
+/*
+ * Variable for holding state of the gatepeterson module
+ */
+struct gatepeterson_moduleobject gatepeterson_state = {
+ .obj_list = LIST_HEAD_INIT(gatepeterson_state.obj_list),
+ .default_cfg.max_name_len = MAX_GATEPETERSON_NAME_LEN,
+ .default_cfg.default_protection = GATEPETERSON_PROTECT_PROCESS,
+ .default_cfg.use_nameserver = true,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.shared_addr_size = 0x0,
+ .def_inst_params.name = NULL,
+ .def_inst_params.local_protection = GATEPETERSON_PROTECT_DEFAULT
+
+};
+
+static void *_gatepeterson_create(const struct gatepeterson_params *params,
+ bool create_flag);
+
+/*
+ * ======== gatepeterson_get_config ========
+ * Purpose:
+ * This will get the default configuration parameters for gatepeterson
+ * module
+ */
+void gatepeterson_get_config(struct gatepeterson_config *config)
+{
+ if (WARN_ON(config == NULL))
+ goto exit;
+
+ if (atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(config, &gatepeterson_state.default_cfg,
+ sizeof(struct gatepeterson_config));
+ else
+ memcpy(config, &gatepeterson_state.cfg,
+ sizeof(struct gatepeterson_config));
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(gatepeterson_get_config);
+
+/*
+ * ======== gatepeterson_setup ========
+ * Purpose:
+ * This will setup the gatepeterson module
+ */
+int gatepeterson_setup(const struct gatepeterson_config *config)
+{
+ struct nameserver_params params;
+ struct gatepeterson_config tmp_cfg;
+ void *nshandle = NULL;
+ s32 retval = 0;
+ s32 ret;
+
+ /* This sets the ref_count variable not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable
+ */
+ atomic_cmpmask_and_set(&gatepeterson_state.ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&gatepeterson_state.ref_count)
+ != GATEPETERSON_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (config == NULL) {
+ gatepeterson_get_config(&tmp_cfg);
+ config = &tmp_cfg;
+ }
+
+ if (WARN_ON(config->max_name_len == 0)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (likely((config->use_nameserver == true))) {
+ retval = nameserver_params_init(&params);
+ params.max_value_len = sizeof(u32);
+ params.max_name_len = config->max_name_len;
+ /* Create the nameserver for modules */
+ nshandle = nameserver_create(GATEPETERSON_NAMESERVER, &params);
+ if (nshandle == NULL)
+ goto exit;
+
+ gatepeterson_state.nshandle = nshandle;
+ }
+
+ memcpy(&gatepeterson_state.cfg, config,
+ sizeof(struct gatepeterson_config));
+ gatepeterson_state.mod_lock = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (gatepeterson_state.mod_lock == NULL) {
+ retval = -ENOMEM;
+ goto lock_create_fail;
+ }
+
+ mutex_init(gatepeterson_state.mod_lock);
+ return 0;
+
+lock_create_fail:
+ if ((likely(config->use_nameserver == true)))
+ ret = nameserver_delete(&gatepeterson_state.nshandle);
+
+exit:
+ atomic_set(&gatepeterson_state.ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+
+ printk(KERN_ERR "gatepeterson_setup failed status: %x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_setup);
+
+/*
+ * ======== gatepeterson_destroy ========
+ * Purpose:
+ * This will destroy the gatepeterson module
+ */
+int gatepeterson_destroy(void)
+
+{
+ struct gatepeterson_obj *obj = NULL;
+ struct mutex *lock = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&gatepeterson_state.ref_count)
+ == GATEPETERSON_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ /* Temporarily increment ref_count here. */
+ atomic_set(&gatepeterson_state.ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(1));
+ /* Check if any gatepeterson instances have not been
+ * ideleted/closed so far, if there any, delete or close them
+ */
+ list_for_each_entry(obj, &gatepeterson_state.obj_list, elem) {
+ if (obj->attrs->creator_proc_id ==
+ multiproc_get_id(NULL))
+ gatepeterson_delete(&obj->top);
+ else
+ gatepeterson_close(&obj->top);
+
+ if (list_empty(&gatepeterson_state.obj_list))
+ break;
+ }
+
+ /* Again reset ref_count. */
+ atomic_set(&gatepeterson_state.ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+
+ retval = mutex_lock_interruptible(gatepeterson_state.mod_lock);
+ if (retval != 0)
+ goto exit;
+
+ if (likely(gatepeterson_state.cfg.use_nameserver == true)) {
+ retval = nameserver_delete(&gatepeterson_state.nshandle);
+ if (unlikely(retval != 0))
+ goto exit;
+ }
+
+ lock = gatepeterson_state.mod_lock;
+ gatepeterson_state.mod_lock = NULL;
+ memset(&gatepeterson_state.cfg, 0, sizeof(struct gatepeterson_config));
+ mutex_unlock(lock);
+ kfree(lock);
+ /* Decrease the ref_count */
+ atomic_set(&gatepeterson_state.ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+ return 0;
+
+exit:;
+ if (retval < 0) {
+ printk(KERN_ERR "gatepeterson_destroy failed status:%x\n",
+ retval);
+ }
+ return retval;
+
+}
+EXPORT_SYMBOL(gatepeterson_destroy);
+
+/*
+ * ======== gatepeterson_params_init ========
+ * Purpose:
+ * This will Initialize this config-params structure with
+ * supplier-specified defaults before instance creation
+ */
+void gatepeterson_params_init(void *handle,
+ struct gatepeterson_params *params)
+{
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(params == NULL))
+ goto exit;
+
+ if (handle == NULL)
+ memcpy(params, &(gatepeterson_state.def_inst_params),
+ sizeof(struct gatepeterson_params));
+ else {
+ struct gatepeterson_obj *obj =
+ (struct gatepeterson_obj *)handle;
+ /* Return updated gatepeterson instance specific parameters. */
+ memcpy(params, &(obj->params),
+ sizeof(struct gatepeterson_params));
+ }
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(gatepeterson_params_init);
+
+/*
+ * ======== gatepeterson_create ========
+ * Purpose:
+ * This will creates a new instance of gatepeterson module
+ */
+void *gatepeterson_create(const struct gatepeterson_params *params)
+{
+ void *handle = NULL;
+ s32 retval = 0;
+ u32 shaddrsize;
+
+ BUG_ON(params == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ shaddrsize = gatepeterson_shared_memreq(params);
+ if (WARN_ON(params->shared_addr == NULL ||
+ params->shared_addr_size < shaddrsize)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (params->local_protection >= GATEPETERSON_PROTECT_END_VALUE) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = _gatepeterson_create(params, true);
+ return handle;
+
+exit:
+ return NULL;
+}
+EXPORT_SYMBOL(gatepeterson_create);
+
+/*
+ * ======== gatepeterson_delete ========
+ * Purpose:
+ * This will deletes an instance of gatepeterson module
+ */
+int gatepeterson_delete(void **gphandle)
+
+{
+ struct gatepeterson_object *handle = NULL;
+ struct gatepeterson_obj *obj = NULL;
+ struct gatepeterson_params *params = NULL;
+ s32 retval;
+
+ BUG_ON(gphandle == NULL);
+ BUG_ON(*gphandle == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ handle = (struct gatepeterson_object *)(*gphandle);
+ obj = (struct gatepeterson_obj *)handle->obj;
+ if (unlikely(obj == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (unlikely(obj->attrs == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ /* Check if we have created the GP or not */
+ if (unlikely(obj->attrs->creator_proc_id != multiproc_get_id(NULL))) {
+ retval = -EACCES;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(obj->local_gate);
+ if (retval)
+ goto exit;
+
+ if (obj->ref_count != 0) {
+ retval = -EBUSY;
+ goto error_handle;
+ }
+
+ obj->attrs->status = !GATEPETERSON_CREATED;
+ retval = mutex_lock_interruptible(gatepeterson_state.mod_lock);
+ if (retval)
+ goto exit;
+
+ list_del(&obj->elem); /* Remove the GP instance from the GP list */
+ mutex_unlock(gatepeterson_state.mod_lock);
+ params = &obj->params;
+ /* Remove from the name server */
+ if (likely(gatepeterson_state.cfg.use_nameserver) &&
+ params->name != NULL) {
+ retval = nameserver_remove_entry(gatepeterson_state.nshandle,
+ obj->ns_key);
+ if (unlikely(retval != 0))
+ goto error_handle;
+ kfree(params->name);
+ obj->ns_key = NULL;
+ }
+
+ mutex_unlock(obj->local_gate);
+ /* If the lock handle was created internally */
+ switch (obj->params.local_protection) {
+ case GATEPETERSON_PROTECT_NONE: /* Fall through */
+ obj->local_gate = NULL; /* TBD: Fixme */
+ break;
+ case GATEPETERSON_PROTECT_INTERRUPT: /* Fall through */
+ /* FIXME: Add a spinlock protection */
+ case GATEPETERSON_PROTECT_TASKLET: /* Fall through */
+ case GATEPETERSON_PROTECT_THREAD: /* Fall through */
+ case GATEPETERSON_PROTECT_PROCESS:
+ kfree(obj->local_gate);
+ break;
+ default:
+ /* An invalid protection level was supplied, FIXME */
+ break;
+ }
+
+ kfree(obj);
+ kfree(handle);
+ *gphandle = NULL;
+ return 0;
+
+error_handle:
+ mutex_unlock(obj->local_gate);
+
+exit:
+ printk(KERN_ERR "gatepeterson_create failed status: %x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_delete);
+
+/*
+ * ======== gatepeterson_inc_refcount ========
+ * Purpose:
+ * This will increment the reference count while opening
+ * a GP instance if it is already opened from local processor
+ */
+static bool gatepeterson_inc_refcount(const struct gatepeterson_params *params,
+ void **handle)
+{
+ struct gatepeterson_obj *obj = NULL;
+ s32 retval = 0;
+ bool done = false;
+
+ list_for_each_entry(obj, &gatepeterson_state.obj_list, elem) {
+ if (params->shared_addr != NULL) {
+ if (obj->params.shared_addr == params->shared_addr) {
+ retval = mutex_lock_interruptible(
+ gatepeterson_state.mod_lock);
+ if (retval)
+ break;
+
+ obj->ref_count++;
+ *handle = obj->top;
+ mutex_unlock(gatepeterson_state.mod_lock);
+ done = true;
+ break;
+ }
+ } else if (params->name != NULL && obj->params.name != NULL) {
+ if (strcmp(obj->params.name, params->name) == 0) {
+ retval = mutex_lock_interruptible(
+ gatepeterson_state.mod_lock);
+ if (retval)
+ break;
+
+ obj->ref_count++;
+ *handle = obj->top;
+ mutex_unlock(gatepeterson_state.mod_lock);
+ done = true;
+ break;
+ }
+ }
+ }
+
+ return done;
+}
+
+/*
+ * ======== gatepeterson_open ========
+ * Purpose:
+ * This will opens a created instance of gatepeterson
+ * module.
+ */
+int gatepeterson_open(void **gphandle,
+ struct gatepeterson_params *params)
+{
+ void *temp = NULL;
+ s32 retval = 0;
+ u32 sharedaddr;
+
+ BUG_ON(params == NULL);
+ BUG_ON(gphandle == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (gatepeterson_state.cfg.use_nameserver == false &&
+ params->shared_addr == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (gatepeterson_state.cfg.use_nameserver == true &&
+ params->shared_addr == NULL && params->name == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (params->shared_addr != NULL && params->shared_addr_size <
+ gatepeterson_shared_memreq(params)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (gatepeterson_inc_refcount(params, &temp)) {
+ retval = -EBUSY;
+ goto exit; /* It's already opened from local processor */
+ }
+
+ if (unlikely(params->shared_addr == NULL)) {
+ if (likely(gatepeterson_state.cfg.use_nameserver == true &&
+ params->name != NULL)) {
+ /* Find in name server */
+ retval = nameserver_get(gatepeterson_state.nshandle,
+ params->name, &sharedaddr,
+ sizeof(u32), NULL);
+ if (retval < 0)
+ goto noentry_fail; /* Entry not found */
+
+ params->shared_addr = sharedregion_get_ptr(
+ (u32 *)sharedaddr);
+ if (params->shared_addr == NULL)
+ goto noentry_fail;
+ }
+ } else
+ sharedaddr = (u32) params->shared_addr;
+
+ if (unlikely(((struct gatepeterson_attrs *)sharedaddr)->status !=
+ GATEPETERSON_CREATED)) {
+ retval = -ENXIO; /* Not created */
+ goto exit;
+ }
+
+ if (unlikely(((struct gatepeterson_attrs *)sharedaddr)->version !=
+ GATEPETERSON_VERSION)) {
+ retval = -ENXIO; /* FIXME Version mismatch,
+ need to change retval */
+ goto exit;
+ }
+
+ *gphandle = _gatepeterson_create(params, false);
+ return 0;
+
+noentry_fail: /* Fall through */
+ retval = -ENOENT;
+exit:
+ printk(KERN_ERR "gatepeterson_open failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_open);
+
+/*
+ * ======== gatepeterson_close ========
+ * Purpose:
+ * This will closes previously opened/created instance
+ * of gatepeterson module
+ */
+int gatepeterson_close(void **gphandle)
+{
+ struct gatepeterson_object *handle = NULL;
+ struct gatepeterson_obj *obj = NULL;
+ struct gatepeterson_params *params = NULL;
+ s32 retval = 0;
+
+ BUG_ON(gphandle == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(*gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct gatepeterson_object *)(*gphandle);
+ obj = (struct gatepeterson_obj *) handle->obj;
+ if (unlikely(obj == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(obj->local_gate);
+ if (retval)
+ goto exit;
+
+ if (obj->ref_count > 1) {
+ obj->ref_count--;
+ mutex_unlock(obj->local_gate);
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(gatepeterson_state.mod_lock);
+ if (retval)
+ goto error_handle;
+
+ list_del(&obj->elem);
+ mutex_unlock(gatepeterson_state.mod_lock);
+ params = &obj->params;
+ if (likely(params->name != NULL))
+ kfree(params->name);
+
+ mutex_unlock(obj->local_gate);
+ /* If the lock handle was created internally */
+ switch (obj->params.local_protection) {
+ case GATEPETERSON_PROTECT_NONE: /* Fall through */
+ obj->local_gate = NULL; /* TBD: Fixme */
+ break;
+ case GATEPETERSON_PROTECT_INTERRUPT: /* Fall through */
+ /* FIXME: Add a spinlock protection */
+ case GATEPETERSON_PROTECT_TASKLET: /* Fall through */
+ case GATEPETERSON_PROTECT_THREAD: /* Fall through */
+ case GATEPETERSON_PROTECT_PROCESS:
+ kfree(obj->local_gate);
+ break;
+ default:
+ /* An invalid protection level was supplied */
+ break;
+ }
+
+ kfree(obj);
+ kfree(handle);
+ *gphandle = NULL;
+ return 0;
+
+error_handle:
+ mutex_unlock(obj->local_gate);
+
+exit:
+ printk(KERN_ERR "gatepeterson_close failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_close);
+
+/*
+ * ======== gatepeterson_enter ========
+ * Purpose:
+ * This will enters the gatepeterson instance
+ */
+u32 gatepeterson_enter(void *gphandle)
+{
+ struct gatepeterson_object *handle = NULL;
+ struct gatepeterson_obj *obj = NULL;
+ s32 retval = 0;
+
+ BUG_ON(gphandle == NULL);
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+
+ handle = (struct gatepeterson_object *)gphandle;
+ obj = (struct gatepeterson_obj *) handle->obj;
+ if (obj->local_gate != NULL)
+ retval = mutex_lock_interruptible(obj->local_gate);
+ if (retval)
+ goto exit;
+
+ obj->nested++;
+ if (obj->nested == 1) {
+ /* indicate, needs to use the resource. */
+ *((u32 *)obj->flag[obj->self_id]) = GATEPETERSON_BUSY ;
+ /* Give away the turn. */
+ *((u32 *)(obj->turn)) = obj->other_id;
+ /* Wait while other processor is using the resource and has
+ * the turn
+ */
+ while ((*((VOLATILE u32 *) obj->flag[obj->other_id])
+ == GATEPETERSON_BUSY) &&
+ (*((VOLATILE u32 *)obj->turn) == obj->other_id))
+ ; /* Empty body loop */
+ }
+
+ return 0;
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_enter);
+
+/*
+ * ======== gatepeterson_leave ========
+ * Purpose:
+ * This will leaves the gatepeterson instance
+ */
+void gatepeterson_leave(void *gphandle, u32 flag)
+{
+ struct gatepeterson_object *handle = NULL;
+ struct gatepeterson_obj *obj = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatepeterson_state.ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ BUG_ON(gphandle == NULL);
+
+ handle = (struct gatepeterson_object *)gphandle;
+ (void) flag;
+ obj = (struct gatepeterson_obj *)handle->obj;
+ obj->nested--;
+ if (obj->nested == 0)
+ *((VOLATILE u32 *)obj->flag[obj->self_id]) = GATEPETERSON_FREE;
+
+ if (obj->local_gate != NULL)
+ mutex_unlock(obj->local_gate);
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(gatepeterson_leave);
+
+/*
+ * ======== gatepeterson_get_knl_handle ========
+ * Purpose:
+ * This will gatepeterson kernel object pointer
+ */
+void *gatepeterson_get_knl_handle(void **gphandle)
+{
+ BUG_ON(gphandle == NULL);
+ return gphandle;
+}
+EXPORT_SYMBOL(gatepeterson_get_knl_handle);
+
+/*
+ * ======== gatepeterson_shared_memreq ========
+ * Purpose:
+ * This will give the amount of shared memory required
+ * for creation of each instance
+ */
+u32 gatepeterson_shared_memreq(const struct gatepeterson_params *params)
+{
+ u32 retval = 0;
+
+ retval = (GATEPETERSON_CACHESIZE * 4) ;
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_shared_memreq);
+
+/*
+ * ======== gatepeterson_create ========
+ * Purpose:
+ * Creates a new instance of gatepeterson module.
+ * This is an internal function because both
+ * gatepeterson_create and gatepeterson_open
+ * call use the same functionality.
+ */
+static void *_gatepeterson_create(const struct gatepeterson_params *params,
+ bool create_flag)
+{
+ int status = 0;
+ struct gatepeterson_object *handle = NULL;
+ struct gatepeterson_obj *obj = NULL;
+ u32 len;
+ u32 shm_index;
+ u32 shared_shm_base;
+ s32 retval = 0;
+
+
+ handle = kmalloc(sizeof(struct gatepeterson_object), GFP_KERNEL);
+ if (handle == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ obj = kmalloc(sizeof(struct gatepeterson_obj), GFP_KERNEL);
+ if (obj == NULL) {
+ retval = -ENOMEM;
+ goto obj_alloc_fail;
+ }
+
+ if (likely(gatepeterson_state.cfg.use_nameserver == true &&
+ params->name != NULL)) {
+ len = strlen(params->name) + 1;
+ obj->params.name = kmalloc(len, GFP_KERNEL);
+ if (obj->params.name == NULL) {
+ retval = -ENOMEM;
+ goto name_alloc_fail;
+ }
+
+ if (create_flag == true) {
+ shm_index = sharedregion_get_index(
+ params->shared_addr);
+ shared_shm_base = (u32)sharedregion_get_srptr(
+ (void *)params->shared_addr,
+ shm_index);
+ obj->ns_key = nameserver_add_uint32(
+ gatepeterson_state.nshandle,
+ params->name,
+ (u32) (shared_shm_base));
+ if (obj->ns_key == NULL) {
+ status = -ENOMEM; /* FIXME */
+ goto ns_add32_fail;
+ }
+ }
+
+ }
+
+ handle->obj = obj;
+ handle->enter = &gatepeterson_enter;
+ handle->leave = &gatepeterson_leave;
+ handle->lock_get_knl_handle = &gatepeterson_get_knl_handle;
+ /* assign the memory with proper cache line padding */
+ obj->attrs = (struct gatepeterson_attrs *) params->shared_addr;
+ obj->flag[0] = ((void *)(((u32) obj->attrs) +
+ GATEPETERSON_CACHESIZE));
+ obj->flag[1] = ((void *)(((u32) obj->flag[0]) +
+ GATEPETERSON_CACHESIZE));
+ obj->turn = ((void *)(((u32) obj->flag[1])
+ + GATEPETERSON_CACHESIZE)); /* TBD: Fixme */
+
+ /* Creator always has selfid set to 0 */
+ if (create_flag == true) {
+ obj->self_id = 0;
+ obj->other_id = 1;
+ obj->attrs->creator_proc_id = multiproc_get_id(NULL);
+ obj->attrs->opener_proc_id = MULTIPROC_INVALIDID;
+ obj->attrs->status = GATEPETERSON_CREATED;
+ obj->attrs->version = GATEPETERSON_VERSION;
+
+ /* Set up shared memory */
+ *(obj->turn) = 0x0;
+ *(obj->flag[0]) = 0x0;
+ *(obj->flag[1]) = 0x0;
+ obj->ref_count = 0;
+ } else {
+ obj->self_id = 1;
+ obj->other_id = 0;
+ obj->attrs->opener_proc_id = multiproc_get_id(NULL);
+ obj->ref_count = 1;
+ }
+ obj->nested = 0;
+ obj->top = handle;
+
+ /* Populate the params member */
+ memcpy(&obj->params, params, sizeof(struct gatepeterson_params));
+
+ /* Create the local lock if not provided */
+ if (likely(params->local_protection == GATEPETERSON_PROTECT_DEFAULT))
+ obj->params.local_protection =
+ gatepeterson_state.cfg.default_protection;
+ else
+ obj->params.local_protection = params->local_protection;
+
+ switch (obj->params.local_protection) {
+ case GATEPETERSON_PROTECT_NONE: /* Fall through */
+ obj->local_gate = NULL; /* TBD: Fixme */
+ break;
+ /* In syslink ; for interrupt protect gatespinlock is used, that
+ * internally uses the mutex. So we added mutex for interrupt
+ * protection here also
+ */
+ case GATEPETERSON_PROTECT_INTERRUPT: /* Fall through */
+ /* FIXME: Add a spinlock protection */
+ case GATEPETERSON_PROTECT_TASKLET: /* Fall through */
+ case GATEPETERSON_PROTECT_THREAD: /* Fall through */
+ case GATEPETERSON_PROTECT_PROCESS:
+ obj->local_gate = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (obj->local_gate == NULL) {
+ retval = -ENOMEM;
+ goto gate_create_fail;
+ }
+
+ mutex_init(obj->local_gate);
+ break;
+ default:
+ /* An invalid protection level was supplied, FIXME */
+ obj->local_gate = NULL;
+ break;
+ }
+
+ /* Put in the local list */
+ retval = mutex_lock_interruptible(gatepeterson_state.mod_lock);
+ if (retval)
+ goto mod_lock_fail;
+
+ list_add_tail(&obj->elem, &gatepeterson_state.obj_list);
+ mutex_unlock(gatepeterson_state.mod_lock);
+ return (void *)handle;
+
+mod_lock_fail:
+ kfree(obj->local_gate);
+
+gate_create_fail:
+ status = nameserver_remove_entry(gatepeterson_state.nshandle,
+ obj->ns_key);
+
+ns_add32_fail:
+ kfree(obj->params.name);
+
+name_alloc_fail:
+ kfree(obj);
+
+obj_alloc_fail:
+ kfree(handle);
+ handle = NULL;
+
+exit:
+ if (create_flag == true)
+ printk(KERN_ERR "_gatepeterson_create (create) failed status: %x\n",
+ retval);
+ else
+ printk(KERN_ERR "_gatepeterson_create (open) failed status: %x\n",
+ retval);
+
+ return NULL;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/gatepeterson_ioctl.c b/drivers/dsp/syslink/multicore_ipc/gatepeterson_ioctl.c
new file mode 100644
index 000000000000..f0d34d1084c8
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gatepeterson_ioctl.c
@@ -0,0 +1,392 @@
+/*
+ * gatepeterson_ioctl.c
+ *
+ * The Gate Peterson Algorithm for mutual exclusion of shared memory.
+ * Current implementation works for 2 processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <gatepeterson.h>
+#include <gatepeterson_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== gatepeterson_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_get_config function
+ */
+static int gatepeterson_ioctl_get_config(struct gatepeterson_cmd_args *cargs)
+{
+ struct gatepeterson_config config;
+ s32 status = 0;
+ s32 size;
+
+ gatepeterson_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct gatepeterson_config));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== gatepeterson_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_setup function
+ */
+static int gatepeterson_ioctl_setup(struct gatepeterson_cmd_args *cargs)
+{
+ struct gatepeterson_config config;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct gatepeterson_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = gatepeterson_setup(&config);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== gatepeterson_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_destroy function
+ */
+static int gatepeterson_ioctl_destroy(
+ struct gatepeterson_cmd_args *cargs)
+{
+ cargs->api_status = gatepeterson_destroy();
+ return 0;
+}
+
+/*
+ * ======== gatepeterson_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_params_init function
+ */
+static int gatepeterson_ioctl_params_init(struct gatepeterson_cmd_args *cargs)
+{
+ struct gatepeterson_params params;
+ s32 status = 0;
+ s32 size;
+
+ gatepeterson_params_init(cargs->args.params_init.handle,
+ &params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct gatepeterson_params));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== gatepeterson_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_create function
+ */
+static int gatepeterson_ioctl_create(struct gatepeterson_cmd_args *cargs)
+{
+ struct gatepeterson_params params;
+ void *handle = NULL;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct gatepeterson_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len + 1,
+ GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.create.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ }
+
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.create.shared_addr_srptr);
+ handle = gatepeterson_create(&params);
+ /* Here we are not validating the return from the module.
+ Even it is nul, we pass it to user and user has to pass
+ proper return to application
+ */
+ cargs->args.create.handle = handle;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== gatepeterson_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_ioctl_delete function
+ */
+static int gatepeterson_ioctl_delete(struct gatepeterson_cmd_args *cargs)
+
+{
+ cargs->api_status = gatepeterson_delete(&cargs->args.delete.handle);
+ return 0;
+}
+
+/*
+ * ======== gatepeterson_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_open function
+ */
+static int gatepeterson_ioctl_open(struct gatepeterson_cmd_args *cargs)
+{
+ struct gatepeterson_params params;
+ void *handle = NULL;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&params, cargs->args.open.params,
+ sizeof(struct gatepeterson_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.open.name_len > 0) {
+ params.name = kmalloc(cargs->args.open.name_len + 1,
+ GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.open.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.open.params->name,
+ cargs->args.open.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+ }
+
+ /* For open by name, the shared_add_srptr may be invalid */
+ if (cargs->args.open.shared_addr_srptr != \
+ (u32) SHAREDREGION_INVALIDSRPTR) {
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.open.shared_addr_srptr);
+ }
+ cargs->api_status = gatepeterson_open(&handle, &params);
+ cargs->args.open.handle = handle;
+
+name_from_usr_error:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== gatepeterson_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_close function
+ */
+static int gatepeterson_ioctl_close(struct gatepeterson_cmd_args *cargs)
+{
+ cargs->api_status = gatepeterson_close(&cargs->args.close.handle);
+ return 0;
+}
+
+/*
+ * ======== gatepeterson_ioctl_enter ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_enter function
+ */
+static int gatepeterson_ioctl_enter(struct gatepeterson_cmd_args *cargs)
+{
+ cargs->api_status = gatepeterson_enter(cargs->args.enter.handle);
+ return 0;
+}
+
+/*
+ * ======== gatepeterson_ioctl_leave ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_leave function
+ */
+static int gatepeterson_ioctl_leave(struct gatepeterson_cmd_args *cargs)
+{
+ gatepeterson_leave(cargs->args.enter.handle,
+ cargs->args.enter.flags);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== gatepeterson_ioctl_shared_memreq ========
+ * Purpose:
+ * This ioctl interface to gatepeterson_shared_memreq function
+ */
+static int gatepeterson_ioctl_shared_memreq(struct gatepeterson_cmd_args *cargs)
+{
+ struct gatepeterson_params params;
+ s32 status = 0;
+ s32 size;
+
+
+ size = copy_from_user(&params, cargs->args.shared_memreq.params,
+ sizeof(struct gatepeterson_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->args.shared_memreq.bytes =
+ gatepeterson_shared_memreq(cargs->args.shared_memreq.params);
+ cargs->api_status = 0;
+
+exit:
+ return status;
+}
+
+/*
+ * ======== gatepeterson_ioctl ========
+ * Purpose:
+ * This ioctl interface for gatepeterson module
+ */
+int gatepeterson_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct gatepeterson_cmd_args __user *uarg =
+ (struct gatepeterson_cmd_args __user *)args;
+ struct gatepeterson_cmd_args cargs;
+
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct gatepeterson_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_GATEPETERSON_GETCONFIG:
+ status = gatepeterson_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_SETUP:
+ status = gatepeterson_ioctl_setup(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_DESTROY:
+ status = gatepeterson_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_PARAMS_INIT:
+ status = gatepeterson_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_CREATE:
+ status = gatepeterson_ioctl_create(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_DELETE:
+ status = gatepeterson_ioctl_delete(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_OPEN:
+ status = gatepeterson_ioctl_open(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_CLOSE:
+ status = gatepeterson_ioctl_close(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_ENTER:
+ status = gatepeterson_ioctl_enter(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_LEAVE:
+ status = gatepeterson_ioctl_leave(&cargs);
+ break;
+
+ case CMD_GATEPETERSON_SHAREDMEMREQ:
+ status = gatepeterson_ioctl_shared_memreq(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct gatepeterson_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/heap.c b/drivers/dsp/syslink/multicore_ipc/heap.c
new file mode 100755
index 000000000000..11df26c11c88
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heap.c
@@ -0,0 +1,101 @@
+/*
+ * heap.c
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+#include <linux/types.h>
+#include <linux/bug.h>
+
+
+#include <heap.h>
+
+
+/*
+ * ======== heap_alloc ========
+ * Purpose:
+ * This will allocate a block of memory of specified
+ * size
+ */
+void *heap_alloc(void *hphandle, u32 size, u32 align)
+{
+ char *block = NULL;
+ struct heap_object *obj = NULL;
+
+ BUG_ON(hphandle == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->alloc == NULL);
+ block = obj->alloc(hphandle, size, align);
+ return block;
+}
+
+/*
+ * ======== heap_free ========
+ * Purpose:
+ * This will frees a block of memory allocated
+ * rom heap
+ */
+int heap_free(void *hphandle, void *block, u32 size)
+{
+ struct heap_object *obj = NULL;
+ s32 retval = 0;
+
+ BUG_ON(hphandle == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->free == NULL);
+ retval = obj->free(hphandle, block, size);
+ return retval;
+}
+
+/*
+ * ======== heap_get_stats ========
+ * Purpose:
+ * This will get the heap memory statistics
+ */
+int heap_get_stats(void *hphandle, struct memory_stats *stats)
+{
+ struct heap_object *obj = NULL;
+ s32 retval = 0;
+
+ BUG_ON(hphandle == NULL);
+ BUG_ON(stats == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->get_stats == NULL);
+ retval = obj->get_stats(hphandle, stats);
+ return retval;
+}
+
+/*
+ * ======== heap_get_extended_stats ========
+ * Purpose:
+ * This will get the heap memory extended statistics
+ */
+int heap_get_extended_stats(void *hphandle,
+ struct heap_extended_stats *stats)
+{
+ struct heap_object *obj = NULL;
+ s32 retval = 0;
+
+ BUG_ON(hphandle == NULL);
+ BUG_ON(stats == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->get_extended_stats == NULL);
+ retval = obj->get_extended_stats(hphandle, stats);
+ return retval;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/heapbuf.c b/drivers/dsp/syslink/multicore_ipc/heapbuf.c
new file mode 100755
index 000000000000..e7d75f640976
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heapbuf.c
@@ -0,0 +1,1173 @@
+/*
+ * heapbuf.c
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <atomic_linux.h>
+#include <multiproc.h>
+#include <nameserver.h>
+#include <sharedregion.h>
+#include <gatepeterson.h>
+#include <heapbuf.h>
+#include <listmp.h>
+#include <listmp_sharedmemory.h>
+
+/*
+ * Name of the reserved nameserver used for heapbuf.
+ */
+#define HEAPBUF_NAMESERVER "HeapBuf"
+#define HEAPBUF_MAX_NAME_LEN 32
+#define HEAPBUF_CACHESIZE 128
+/* brief Macro to make a correct module magic number with refCount */
+#define HEAPBUF_MAKE_MAGICSTAMP(x) ((HEAPBUF_MODULEID << 12) | (x))
+
+
+/*
+ * Structure defining attribute parameters for the heapbuf module
+ */
+struct heapbuf_attrs {
+ VOLATILE u32 version;
+ VOLATILE u32 status;
+ VOLATILE u32 num_free_blocks;
+ VOLATILE u32 min_free_blocks;
+ VOLATILE u32 block_size;
+ VOLATILE u32 align;
+ VOLATILE u32 num_blocks;
+ VOLATILE u32 buf_size;
+ VOLATILE char *buf;
+};
+
+/*
+ * Structure defining processor related information for the
+ * heapbuf module
+ */
+struct heapbuf_proc_attrs {
+ bool creator; /* Creator or opener */
+ u16 proc_id; /* Processor identifier */
+ u32 open_count; /* open count in a processor */
+};
+
+/*
+ * Structure for heapbuf module state
+ */
+struct heapbuf_module_object {
+ atomic_t ref_count; /* Reference count */
+ void *ns_handle;
+ struct list_head obj_list; /* List holding created objects */
+ struct mutex *local_lock; /* lock for protecting obj_list */
+ struct heapbuf_config cfg;
+ struct heapbuf_config default_cfg; /* Default config values */
+ struct heapbuf_params default_inst_params; /* Default instance
+ creation parameters */
+};
+
+struct heapbuf_module_object heapbuf_state = {
+ .obj_list = LIST_HEAD_INIT(heapbuf_state.obj_list),
+ .default_cfg.max_name_len = HEAPBUF_MAX_NAME_LEN,
+ .default_cfg.use_nameserver = true,
+ .default_cfg.track_max_allocs = false,
+ .default_inst_params.gate = NULL,
+ .default_inst_params.exact = false,
+ .default_inst_params.name = NULL,
+ .default_inst_params.resource_id = 0,
+ .default_inst_params.cache_flag = false,
+ .default_inst_params.align = 1,
+ .default_inst_params.num_blocks = 0,
+ .default_inst_params.block_size = 0,
+ .default_inst_params.shared_addr = NULL,
+ .default_inst_params.shared_addr_size = 0,
+ .default_inst_params.shared_buf = NULL,
+ .default_inst_params.shared_buf_size = 0
+};
+
+/*
+ * Structure for the handle for the heapbuf
+ */
+struct heapbuf_obj {
+ struct list_head list_elem; /* Used for creating a linked list */
+ struct heapbuf_params params; /* The creation parameter structure */
+ struct heapbuf_attrs *attrs; /* The shared attributes structure */
+ void *free_list; /* List of free buffers */
+ struct mutex *gate; /* Lock used for critical region management */
+ void *ns_key; /* nameserver key required for remove */
+ struct heapbuf_proc_attrs owner; /* owner processor info */
+ void *top; /* Pointer to the top object */
+ bool cacheFlag; /* added for future use */
+};
+
+/*
+ * ======== heapbuf_get_config ========
+ * Purpose:
+ * This will get default configuration for the
+ * heapbuf module
+ */
+int heapbuf_get_config(struct heapbuf_config *cfgparams)
+{
+ BUG_ON(cfgparams == NULL);
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(cfgparams, &heapbuf_state.default_cfg,
+ sizeof(struct heapbuf_config));
+ else
+ memcpy(cfgparams, &heapbuf_state.cfg,
+ sizeof(struct heapbuf_config));
+ return 0;
+}
+EXPORT_SYMBOL(heapbuf_get_config);
+
+/*
+ * ======== heapbuf_setup ========
+ * Purpose:
+ * This will setup the heapbuf module
+ *
+ * This function sets up the HeapBuf module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then heapbuf_getconfig can be called to get
+ * the configuration filled with the default values. After this,
+ * only the required configuration values can be changed. If the
+ * user does not wish to make any change in the default parameters,
+ * the application can simply call HeapBuf_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ */
+int heapbuf_setup(const struct heapbuf_config *cfg)
+{
+ struct nameserver_params params;
+ struct heapbuf_config tmp_cfg;
+ void *ns_handle = NULL;
+ s32 retval = 0;
+
+ /* This sets the ref_count variable not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable
+ */
+ atomic_cmpmask_and_set(&heapbuf_state.ref_count,
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&heapbuf_state.ref_count)
+ != HEAPBUF_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ heapbuf_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ if (cfg->max_name_len == 0 ||
+ cfg->max_name_len > HEAPBUF_MAX_NAME_LEN) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ heapbuf_state.local_lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (heapbuf_state.local_lock == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ if (likely((cfg->use_nameserver == true))) {
+ retval = nameserver_get_params(NULL, &params);
+ params.max_value_len = sizeof(u32);
+ params.max_name_len = cfg->max_name_len;
+ ns_handle = nameserver_create(HEAPBUF_NAMESERVER, &params);
+ if (ns_handle == NULL) {
+ retval = -EFAULT;
+ goto ns_create_fail;
+ }
+ heapbuf_state.ns_handle = ns_handle;
+ }
+
+ memcpy(&heapbuf_state.cfg, cfg, sizeof(struct heapbuf_config));
+ mutex_init(heapbuf_state.local_lock);
+ return 0;
+
+ns_create_fail:
+ kfree(heapbuf_state.local_lock);
+
+error:
+ atomic_set(&heapbuf_state.ref_count,
+ HEAPBUF_MAKE_MAGICSTAMP(0));
+
+ printk(KERN_ERR "heapbuf_setup failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_setup);
+
+/*
+ * ======== heapbuf_destroy ========
+ * Purpose:
+ * This will destroy the heapbuf module
+ */
+int heapbuf_destroy(void)
+{
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+ struct heapbuf_obj *obj = NULL;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (atomic_dec_return(&heapbuf_state.ref_count)
+ == HEAPBUF_MAKE_MAGICSTAMP(0)) {
+ /* Temporarily increment ref_count here. */
+ atomic_set(&heapbuf_state.ref_count,
+ HEAPBUF_MAKE_MAGICSTAMP(1));
+
+ /* Check if any heapbuf instances have not been deleted/closed
+ * so far. if there any, delete or close them
+ */
+ list_for_each_entry(obj, &heapbuf_state.obj_list, list_elem) {
+ if (obj->owner.proc_id == multiproc_get_id(NULL))
+ heapbuf_delete(&obj->top);
+ else
+ heapbuf_close(obj->top);
+
+ if (list_empty(&heapbuf_state.obj_list))
+ break;
+ }
+
+ /* Again reset ref_count. */
+ atomic_set(&heapbuf_state.ref_count,
+ HEAPBUF_MAKE_MAGICSTAMP(0));
+
+ if (likely(heapbuf_state.cfg.use_nameserver == true)) {
+ retval = nameserver_delete(&heapbuf_state.ns_handle);
+ if (unlikely(retval != 0))
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(heapbuf_state.local_lock);
+ if (retval)
+ goto error;
+
+ lock = heapbuf_state.local_lock;
+ heapbuf_state.local_lock = NULL;
+ mutex_unlock(lock);
+ kfree(lock);
+ memset(&heapbuf_state.cfg, 0, sizeof(struct heap_config));
+
+ atomic_set(&heapbuf_state.ref_count,
+ HEAPBUF_MAKE_MAGICSTAMP(0));
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbuf_destroy failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_destroy);
+
+/*
+ * ======== heapbuf_params_init ========
+ * Purpose:
+ * This will get the intialization prams for a heapbuf
+ * module instance
+ */
+void heapbuf_params_init(void *handle,
+ struct heapbuf_params *params)
+{
+ struct heapbuf_obj *obj = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(params == NULL);
+
+ if (handle == NULL)
+ memcpy(params, &heapbuf_state.default_inst_params,
+ sizeof(struct heapbuf_params));
+ else {
+ obj = (struct heapbuf_obj *)handle;
+ memcpy(params, (void *)&obj->params,
+ sizeof(struct heapbuf_params));
+ }
+ return;
+error:
+ printk(KERN_ERR "heapbuf_params_init failed status: %x\n", retval);
+}
+EXPORT_SYMBOL(heapbuf_params_init);
+
+/*
+ * ======== _heapbuf_create ========
+ * Purpose:
+ * This will create a new instance of heapbuf module
+ * This is an internal function as both heapbuf_create
+ * and heapbuf_open use the functionality
+ *
+ * NOTE: The lock to protect the shared memory area
+ * used by heapbuf is provided by the consumer of
+ * heapbuf module
+ */
+int _heapbuf_create(void **handle_ptr, const struct heapbuf_params *params,
+ u32 create_flag)
+{
+ struct heap_object *handle = NULL;
+ struct heapbuf_obj *obj = NULL;
+ char *buf = NULL;
+ listmp_sharedmemory_params listmp_params;
+ s32 retval = 0;
+ u32 i;
+ s32 align;
+ s32 shm_index;
+ u32 shared_shm_base;
+ void *entry = NULL;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(handle_ptr == NULL);
+
+ BUG_ON(params == NULL);
+
+ /* No need for parameter checks, since this is an internal function. */
+
+ /* Initialize return parameter. */
+ *handle_ptr = NULL;
+
+ handle = kmalloc(sizeof(struct heap_object), GFP_KERNEL);
+ if (handle == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ obj = kmalloc(sizeof(struct heapbuf_obj), GFP_KERNEL);
+ if (obj == NULL) {
+ retval = -ENOMEM;
+ goto obj_alloc_error;
+ }
+
+ handle->obj = (struct heapbuf_obj *)obj;
+ handle->alloc = &heapbuf_alloc;
+ handle->free = &heapbuf_free;
+ handle->get_stats = &heapbuf_get_stats;
+ /* FIXME: handle->is_blocking = &heapbuf_isblocking; */
+ /* Create the shared list */
+ listmp_sharedmemory_params_init(NULL, &listmp_params);
+ listmp_params.shared_addr = (u32 *)((u32) (params->shared_addr)
+ + ((sizeof(struct heapbuf_attrs)
+ + (HEAPBUF_CACHESIZE - 1))
+ & ~(HEAPBUF_CACHESIZE - 1)));
+ listmp_params.shared_addr_size =
+ listmp_sharedmemory_shared_memreq(&listmp_params);
+ listmp_params.gate = NULL;
+ /* Assign the memory with proper cache line padding */
+ obj->attrs = (struct heapbuf_attrs *) params->shared_addr;
+
+ if (create_flag == false)
+ listmp_sharedmemory_open(&obj->free_list, &listmp_params);
+ else {
+ obj->free_list = listmp_sharedmemory_create(&listmp_params);
+
+ if (obj->free_list == NULL) {
+ retval = -ENOMEM;
+ goto listmp_error;
+ }
+
+ obj->attrs->version = HEAPBUF_VERSION;
+ obj->attrs->num_free_blocks = params->num_blocks;
+ obj->attrs->min_free_blocks = params->num_blocks;
+ obj->attrs->block_size = params->block_size;
+ obj->attrs->align = params->align;
+ obj->attrs->num_blocks = params->num_blocks;
+ obj->attrs->buf_size = params->shared_buf_size;
+ buf = params->shared_buf;
+ align = obj->attrs->align;
+ buf = (char *)(((u32)buf + (align - 1)) & ~(align - 1));
+ obj->attrs->buf = buf;
+
+ /*
+ * Split the buffer into blocks that are length
+ * block_size" and add into the free_list Queue
+ */
+ for (i = 0; i < obj->attrs->num_blocks; i++) {
+ listmp_put_tail((struct listmp_object *)
+ obj->free_list,
+ (struct listmp_elem *)buf);
+ buf += obj->attrs->block_size;
+ }
+ }
+
+ obj->gate = params->gate;
+
+ /* Populate the params member */
+ memcpy(&obj->params, params, sizeof(struct heapbuf_params));
+ if (params->name != NULL) {
+ obj->params.name = kmalloc(strlen(params->name) + 1,
+ GFP_KERNEL);
+ if (obj->params.name == NULL) {
+ retval = -ENOMEM;
+ goto name_alloc_error;
+ }
+ strncpy(obj->params.name, params->name,
+ strlen(params->name) + 1);
+ }
+
+ if (create_flag == true) {
+ obj->owner.creator = true;
+ obj->owner.open_count = 1;
+ obj->owner.proc_id = multiproc_get_id(NULL);
+ obj->top = handle;
+ obj->attrs->status = HEAPBUF_CREATED;
+ } else {
+ obj->owner.creator = false;
+ obj->owner.open_count = 0;
+ obj->owner.proc_id = MULTIPROC_INVALIDID;
+ obj->top = handle;
+ }
+
+ retval = mutex_lock_interruptible(heapbuf_state.local_lock);
+ if (retval)
+ goto lock_error;
+
+ INIT_LIST_HEAD(&obj->list_elem);
+ list_add_tail(&obj->list_elem, &heapbuf_state.obj_list);
+ mutex_unlock(heapbuf_state.local_lock);
+
+ if ((likely(heapbuf_state.cfg.use_nameserver == true))
+ && (create_flag == true)) {
+ /* We will store a shared pointer in the nameserver */
+ shm_index = sharedregion_get_index(params->shared_addr);
+ shared_shm_base = (u32)sharedregion_get_srptr(
+ params->shared_addr, shm_index);
+ if (obj->params.name != NULL) {
+ entry = nameserver_add_uint32(heapbuf_state.ns_handle,
+ params->name,
+ (u32)(shared_shm_base));
+ if (entry == NULL) {
+ retval = -EFAULT;
+ goto ns_add_error;
+ }
+ }
+ }
+
+ *handle_ptr = (void *)handle;
+ return retval;
+
+ns_add_error:
+ retval = mutex_lock_interruptible(heapbuf_state.local_lock);
+ list_del(&obj->list_elem);
+ mutex_unlock(heapbuf_state.local_lock);
+
+lock_error:
+ if (obj->params.name != NULL) {
+ if (obj->ns_key != NULL) {
+ nameserver_remove_entry(heapbuf_state.ns_handle,
+ obj->ns_key);
+ obj->ns_key = NULL;
+ }
+ kfree(obj->params.name);
+ }
+
+name_alloc_error: /* Fall through */
+ if (create_flag == true)
+ listmp_sharedmemory_delete((listmp_sharedmemory_handle *)
+ &obj->free_list);
+ else
+ listmp_sharedmemory_close((listmp_sharedmemory_handle *)
+ &obj->free_list);
+
+listmp_error:
+ kfree(obj);
+
+obj_alloc_error:
+ kfree(handle);
+
+error:
+ printk(KERN_ERR "_heapbuf_create failed status: %x\n", retval);
+ return retval;
+}
+
+/*
+ * ======== heapbuf_create ========
+ * Purpose:
+ * This will create a new instance of heapbuf module
+ */
+void *heapbuf_create(const struct heapbuf_params *params)
+{
+ s32 retval = 0;
+ void *handle = NULL;
+ u32 buf_size;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(params == NULL);
+
+ if ((params->shared_addr) == NULL ||
+ params->shared_buf == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if ((params->shared_addr_size)
+ < heapbuf_shared_memreq(params, &buf_size)) {
+ /* if Shared memory size is less than required */
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (params->shared_buf_size < buf_size) {
+ /* if shared memory size is less than required */
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = _heapbuf_create((void **)&handle, params, true);
+ if (retval < 0)
+ goto error;
+
+ return (void *)handle;
+
+error:
+ printk(KERN_ERR "heapbuf_create failed status: %x\n", retval);
+ return (void *)handle;
+}
+EXPORT_SYMBOL(heapbuf_create);
+
+/*
+ * ======== heapbuf_delete ========
+ * Purpose:
+ * This will delete an instance of heapbuf module
+ */
+int heapbuf_delete(void **handle_ptr)
+{
+ int status = 0;
+ struct heap_object *handle = NULL;
+ struct heapbuf_obj *obj = NULL;
+ struct heapbuf_params *params = NULL;
+ s32 retval = 0;
+ u16 myproc_id;
+
+ BUG_ON(handle_ptr == NULL);
+ handle = (struct heap_object *)(*handle_ptr);
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ obj = (struct heapbuf_obj *)handle->obj;
+ if (obj == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ myproc_id = multiproc_get_id(NULL);
+
+ if (obj->owner.proc_id != myproc_id) {
+ retval = -EPERM;
+ goto error;
+ }
+
+ if (likely(obj->gate != NULL)) {
+ status = gatepeterson_enter(obj->gate);
+ if (status < 0) {
+ retval = -EINVAL;
+ goto gate_error;
+ }
+ }
+
+ if (obj->owner.open_count > 1) {
+ retval = -EBUSY;
+ goto device_busy_error;;
+ }
+
+ if (obj->owner.open_count != 1) {
+ retval = -EBUSY;
+ goto device_busy_error;;
+ }
+
+ retval = mutex_lock_interruptible(heapbuf_state.local_lock);
+ if (retval)
+ goto local_lock_error;
+
+ list_del(&obj->list_elem);
+ mutex_unlock(heapbuf_state.local_lock);
+ params = (struct heapbuf_params *) &obj->params;
+ if (likely(params->name != NULL)) {
+ if (likely(heapbuf_state.cfg.use_nameserver == true)) {
+ retval = nameserver_remove(heapbuf_state.ns_handle,
+ params->name);
+ if (retval != 0)
+ goto ns_remove_error;
+ obj->ns_key = NULL;
+ }
+ kfree(params->name);
+ }
+
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+ retval = listmp_sharedmemory_delete(&obj->free_list);
+ kfree(obj);
+ kfree(handle);
+ *handle_ptr = NULL;
+ return 0;
+
+ns_remove_error: /* Fall through */
+gate_error: /* Fall through */
+local_lock_error: /* Fall through */
+device_busy_error:
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+
+error:
+ printk(KERN_ERR "heapbuf_delete failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_delete);
+
+/*
+ * ======== heapbuf_open ========
+ * Purpose:
+ * This will opens a created instance of heapbuf
+ * module
+ */
+int heapbuf_open(void **handle_ptr,
+ struct heapbuf_params *params)
+{
+ struct heapbuf_obj *obj = NULL;
+ bool found = false;
+ s32 retval = 0;
+ u16 myproc_id;
+ u32 shared_shm_base;
+ struct heapbuf_attrs *attrs;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if ((heapbuf_state.cfg.use_nameserver == false)
+ && (params->shared_addr == (u32)NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if ((heapbuf_state.cfg.use_nameserver == true)
+ && (params->shared_addr == (u32)NULL)
+ && (params->name == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ myproc_id = multiproc_get_id(NULL);
+ list_for_each_entry(obj, &heapbuf_state.obj_list, list_elem) {
+ if (obj->params.shared_addr == params->shared_addr)
+ found = true;
+ else if (params->name != NULL) {
+ if (strcmp(obj->params.name, params->name) == 0)
+ found = true;
+ }
+
+ if (found == true) {
+ retval = mutex_lock_interruptible(
+ heapbuf_state.local_lock);
+ if (retval)
+ goto error;
+ if (obj->owner.proc_id == myproc_id)
+ obj->owner.open_count++;
+ *handle_ptr = obj->top;
+ mutex_unlock(heapbuf_state.local_lock);
+ }
+ }
+
+ if (likely(found == false)) {
+ if (unlikely(params->shared_addr == NULL)) {
+ if (likely(heapbuf_state.cfg.use_nameserver == true)) {
+ /* Find in name server */
+ retval = nameserver_get(heapbuf_state.ns_handle,
+ params->name,
+ &shared_shm_base,
+ sizeof(u32),
+ NULL);
+ if (retval < 0)
+ goto error;
+
+ /*
+ * Convert from shared region pointer
+ * to local address
+ */
+ params->shared_addr = sharedregion_get_ptr
+ (&shared_shm_base);
+ if (params->shared_addr == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+ attrs = (struct heapbuf_attrs *)(params->shared_addr);
+ if (unlikely(attrs->status != (HEAPBUF_CREATED)))
+ retval = -ENXIO; /* Not created */
+ else if (unlikely(attrs->version != (HEAPBUF_VERSION))) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = _heapbuf_create((void **)handle_ptr, params, false);
+ if (retval < 0)
+ goto error;
+
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbuf_open failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_open);
+
+/*
+ * ======== heapbuf_close ========
+ * Purpose:
+ * This will closes previously opened/created instance
+ * of heapbuf module
+ */
+int heapbuf_close(void *handle_ptr)
+{
+ int status = 0;
+ struct heap_object *handle = NULL;
+ struct heapbuf_obj *obj = NULL;
+ struct heapbuf_params *params = NULL;
+ s32 retval = 0;
+ u16 myproc_id = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heap_object *)(handle_ptr);
+ obj = (struct heapbuf_obj *)handle->obj;
+
+ if (obj != NULL) {
+ retval = mutex_lock_interruptible(heapbuf_state.local_lock);
+ if (retval)
+ goto error;
+
+ myproc_id = multiproc_get_id(NULL);
+ /* opening an instance created locally */
+ if (obj->owner.proc_id == myproc_id) {
+ if (obj->owner.open_count > 1)
+ obj->owner.open_count--;
+ }
+
+ /* Check if HeapBuf is opened on same processor*/
+ if ((((struct heapbuf_obj *)obj)->owner.creator == false)
+ && (obj->owner.open_count == 0)) {
+ list_del(&obj->list_elem);
+
+ /* Take the local lock */
+ if (likely(obj->gate != NULL)) {
+ status = gatepeterson_enter(obj->gate);
+ if (status < 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+
+ params = (struct heapbuf_params *)&obj->params;
+ if (likely((params->name) != NULL))
+ kfree(params->name); /* Free memory */
+
+ /* Release the local lock */
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+
+ /* Delete the list */
+ listmp_sharedmemory_close((listmp_sharedmemory_handle *)
+ obj->free_list);
+ kfree(obj);
+ kfree(handle);
+ handle = NULL;
+ }
+ mutex_unlock(heapbuf_state.local_lock);
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbuf_close failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_close);
+
+/*
+ * ======== heapbuf_alloc ========
+ * Purpose:
+ * This will allocs a block of memory
+ */
+void *heapbuf_alloc(void *hphandle, u32 size, u32 align)
+{
+ int status = 0;
+ struct heap_object *handle = NULL;
+ struct heapbuf_obj *obj = NULL;
+ char *block = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(size == 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heap_object *)(hphandle);
+ obj = (struct heapbuf_obj *)handle->obj;
+
+ if ((obj->params.exact == true)
+ && (size != obj->attrs->block_size)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (size > obj->attrs->block_size) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (likely(obj->gate != NULL)) {
+ status = gatepeterson_enter(obj->gate);
+ if (status < 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+
+ block = listmp_get_head((void *)obj->free_list);
+ if (block == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ obj->attrs->num_free_blocks--;
+ /*
+ * Keep track of the min number of free for this heapbuf, if user
+ * has set the config variable trackMaxAllocs to true.
+ *
+ * The min number of free blocks, 'min_free_blocks', will be used to
+ * compute the "all time" maximum number of allocated blocks in
+ * getExtendedStats().
+ */
+ if (heapbuf_state.cfg.track_max_allocs) {
+ if (obj->attrs->num_free_blocks < obj->attrs->min_free_blocks)
+ /* save the new minimum */
+ obj->attrs->min_free_blocks =
+ obj->attrs->num_free_blocks;
+ }
+
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+ return block;
+error:
+ printk(KERN_ERR "heapbuf_alloc failed status: %x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(heapbuf_alloc);
+
+/*
+ * ======== heapbuf_free ========
+ * Purpose:
+ * This will free a block of memory
+ */
+int heapbuf_free(void *hphandle, void *block, u32 size)
+{
+ int status = 0;
+ struct heap_object *handle = NULL;
+ struct heapbuf_obj *obj = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(block == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heap_object *)(hphandle);
+ obj = (struct heapbuf_obj *)handle->obj;
+ if (likely(obj->gate != NULL)) {
+ status = gatepeterson_enter(obj->gate);
+ if (status < 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+
+ retval = listmp_put_tail((void *)obj->free_list, block);
+ obj->attrs->num_free_blocks++;
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbuf_free failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_free);
+
+/*
+ * ======== heapbuf_get_stats ========
+ * Purpose:
+ * This will get memory statistics
+ */
+int heapbuf_get_stats(void *hphandle, struct memory_stats *stats)
+{
+ int status = 0;
+ struct heap_object *object = NULL;
+ struct heapbuf_obj *obj = NULL;
+ u32 block_size;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(stats == NULL);
+
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ object = (struct heap_object *)(hphandle);
+ obj = (struct heapbuf_obj *)object->obj;
+
+ if (likely(obj->gate != NULL)) {
+ status = gatepeterson_enter(obj->gate);
+ if (status < 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+
+ block_size = obj->attrs->block_size;
+ stats->total_size = (u32 *)(block_size * obj->attrs->num_blocks);
+ stats->total_free_size = (u32 *)(block_size *
+ obj->attrs->num_free_blocks);
+ if (obj->attrs->num_free_blocks)
+ stats->largest_free_size = (u32 *)block_size;
+ else
+ stats->largest_free_size = (u32 *)0;
+
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbuf_get_stats failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_get_stats);
+
+/*
+ * ======== heapbuf_isblocking ========
+ * Purpose:
+ * Indicate whether the heap may block during an alloc or free call
+ */
+bool heapbuf_isblocking(void *handle)
+{
+ bool isblocking = false;
+ s32 retval = 0;
+
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* TBD: Figure out how to determine whether the gate is blocking */
+ isblocking = true;
+
+ /* retval true Heap blocks during alloc/free calls */
+ /* retval false Heap does not block during alloc/free calls */
+ return isblocking;
+
+error:
+ printk(KERN_ERR "heapbuf_isblocking status: %x\n", retval);
+ return isblocking;
+}
+EXPORT_SYMBOL(heapbuf_isblocking);
+
+/*
+ * ======== heapbuf_get_extended_stats ========
+ * Purpose:
+ * This will get extended statistics
+ */
+int heapbuf_get_extended_stats(void *hphandle,
+ struct heapbuf_extended_stats *stats)
+{
+ int status = 0;
+ struct heap_object *object = NULL;
+ struct heapbuf_obj *obj = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbuf_state.ref_count),
+ HEAPBUF_MAKE_MAGICSTAMP(0),
+ HEAPBUF_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(stats == NULL);
+ if (WARN_ON(heapbuf_state.ns_handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ object = (struct heap_object *)(hphandle);
+ obj = (struct heapbuf_obj *)object->obj;
+ if (likely(obj->gate != NULL)) {
+ status = gatepeterson_enter(obj->gate);
+ if (status < 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+
+ /*
+ * The maximum number of allocations for this heapbuf (for any given
+ * instance of time during its liftime) is computed as follows:
+ *
+ * max_allocated_blocks = obj->num_blocks - obj->min_free_blocks
+ *
+ * Note that max_allocated_blocks is *not* the maximum allocation count,
+ * but rather the maximum allocations seen at any snapshot of time in
+ * the heapbuf instance.
+ */
+ /* if nothing has been alloc'ed yet, return 0 */
+ if ((u32)(obj->attrs->min_free_blocks) == -1) /* FIX THIS */
+ stats->max_allocated_blocks = 0;
+ else
+ stats->max_allocated_blocks = obj->attrs->num_blocks
+ - obj->attrs->min_free_blocks;
+ /* current number of alloc'ed blocks is computed using curr # free
+ * blocks
+ */
+ stats->num_allocated_blocks = obj->attrs->num_blocks
+ - obj->attrs->num_free_blocks;
+ if (likely(obj->gate != NULL))
+ gatepeterson_leave(obj->gate, 0);
+
+error:
+ printk(KERN_ERR "heapbuf_get_extended_stats status: %x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbuf_get_extended_stats);
+
+/*
+ * ======== heapbuf_shared_memreq ========
+ * Purpose:
+ * This will get amount of shared memory required for
+ * creation of each instance
+ */
+int heapbuf_shared_memreq(const struct heapbuf_params *params, u32 *buf_size)
+{
+ int state_size = 0;
+ listmp_sharedmemory_params listmp_params;
+
+ BUG_ON(params == NULL);
+
+ /* Size for attrs */
+ state_size = (sizeof(struct heapbuf_attrs) + (HEAPBUF_CACHESIZE - 1))
+ & ~(HEAPBUF_CACHESIZE - 1);
+
+ listmp_params_init(NULL, &listmp_params);
+ listmp_params.resource_id = params->resource_id;
+ state_size += listmp_shared_memreq(&listmp_params);
+
+ /* Determine size for the buffer */
+ *buf_size = params->num_blocks * params->block_size;
+
+ return state_size;
+}
+EXPORT_SYMBOL(heapbuf_shared_memreq);
diff --git a/drivers/dsp/syslink/multicore_ipc/heapbuf_ioctl.c b/drivers/dsp/syslink/multicore_ipc/heapbuf_ioctl.c
new file mode 100755
index 000000000000..62928088fd65
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heapbuf_ioctl.c
@@ -0,0 +1,485 @@
+/*
+ * heapbuf_ioctl.c
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <heap.h>
+#include <heapbuf.h>
+#include <heapbuf_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== heapbuf_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to heapbuf_alloc function
+ */
+static int heapbuf_ioctl_alloc(struct heapbuf_cmd_args *cargs)
+{
+ u32 *block_srptr = SHAREDREGION_INVALIDSRPTR;
+ void *block;
+ s32 index;
+ s32 status = 0;
+
+ block = heapbuf_alloc(cargs->args.alloc.handle,
+ cargs->args.alloc.size,
+ cargs->args.alloc.align);
+ if (block != NULL) {
+ index = sharedregion_get_index(block);
+ block_srptr = sharedregion_get_srptr(block, index);
+ }
+ /* The error on above fn will be a null ptr. We are not
+ checking that condition here. We are passing whatever
+ we are getting from the heapbuf module. So IOCTL will succed,
+ but the actual fn might be failed inside heapbuf
+ */
+ cargs->args.alloc.block_srptr = block_srptr;
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== heapbuf_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to heapbuf_free function
+ */
+static int heapbuf_ioctl_free(struct heapbuf_cmd_args *cargs)
+{
+ char *block;
+
+ block = sharedregion_get_ptr(cargs->args.free.block_srptr);
+ cargs->api_status = heapbuf_free(cargs->args.free.handle, block,
+ cargs->args.free.size);
+ return 0;
+}
+
+/*
+ * ======== heapbuf_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to heapbuf_params_init function
+ */
+static int heapbuf_ioctl_params_init(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_params params;
+ s32 status = 0;
+ u32 size;
+
+ heapbuf_params_init(cargs->args.params_init.handle, &params);
+ cargs->api_status = 0;
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct heapbuf_params));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapbuf_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to heapbuf_create function
+ */
+static int heapbuf_ioctl_create(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_params params;
+ s32 status = 0;
+ u32 size;
+ void *handle = NULL;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct heapbuf_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.create.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+ }
+
+ params.shared_addr = sharedregion_get_ptr((u32 *)
+ cargs->args.create.shared_addr_srptr);
+ params.shared_buf = sharedregion_get_ptr((u32 *)
+ cargs->args.create.shared_buf_srptr);
+ params.gate = cargs->args.create.knl_gate;
+ handle = heapbuf_create(&params);
+ cargs->args.create.handle = handle;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapbuf_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to heapbuf_delete function
+ */
+static int heapbuf_ioctl_delete(struct heapbuf_cmd_args *cargs)
+{
+ cargs->api_status = heapbuf_delete(&cargs->args.delete.handle);
+ return 0;
+}
+
+/*
+ * ======== heapbuf_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to heapbuf_open function
+ */
+static int heapbuf_ioctl_open(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_params params;
+ void *handle = NULL;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&params, cargs->args.open.params,
+ sizeof(struct heapbuf_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.open.name_len > 0) {
+ params.name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.create.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.open.params->name,
+ cargs->args.open.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ /* For open by name, the shared_add_srptr may be invalid */
+ if (cargs->args.open.shared_addr_srptr != SHAREDREGION_INVALIDSRPTR) {
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.open.shared_addr_srptr);
+ }
+ params.gate = cargs->args.open.knl_gate;
+
+ cargs->api_status = heapbuf_open(&handle, &params);
+ if (cargs->api_status < 0)
+ goto free_name;
+
+ cargs->args.open.handle = handle;
+ size = copy_to_user(cargs->args.open.params, &params,
+ sizeof(struct heapbuf_params));
+ if (size) {
+ status = -EFAULT;
+ goto copy_to_usr_error;
+ }
+
+ goto free_name;
+
+copy_to_usr_error:
+ if (handle) {
+ heapbuf_close(handle);
+ cargs->args.open.handle = NULL;
+ }
+
+free_name:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapbuf_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to heapbuf_close function
+ */
+static int heapbuf_ioctl_close(struct heapbuf_cmd_args *cargs)
+{
+ cargs->api_status = heapbuf_close(cargs->args.close.handle);
+ return 0;
+}
+
+/*
+ * ======== heapbuf_ioctl_shared_memreq ========
+ * Purpose:
+ * This ioctl interface to heapbuf_shared_memreq function
+ */
+static int heapbuf_ioctl_shared_memreq(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_params params;
+ s32 status = 0;
+ ulong size;
+ u32 bytes;
+
+ size = copy_from_user(&params, cargs->args.shared_memreq.params,
+ sizeof(struct heapbuf_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ bytes = heapbuf_shared_memreq(&params,
+ &cargs->args.shared_memreq.buf_size);
+ cargs->args.shared_memreq.bytes = bytes;
+ cargs->api_status = 0;
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapbuf_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to heapbuf_get_config function
+ */
+static int heapbuf_ioctl_get_config(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_config config;
+ s32 status = 0;
+ ulong size;
+
+ cargs->api_status = heapbuf_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct heapbuf_config));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapbuf_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to heapbuf_setup function
+ */
+static int heapbuf_ioctl_setup(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_config config;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct heapbuf_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = heapbuf_setup(&config);
+
+exit:
+ return status;
+}
+/*
+ * ======== heapbuf_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to heapbuf_destroy function
+ */
+static int heapbuf_ioctl_destroy(struct heapbuf_cmd_args *cargs)
+{
+ cargs->api_status = heapbuf_destroy();
+ return 0;
+}
+
+
+/*
+ * ======== heapbuf_ioctl_get_stats ========
+ * Purpose:
+ * This ioctl interface to heapbuf_get_stats function
+ */
+static int heapbuf_ioctl_get_stats(struct heapbuf_cmd_args *cargs)
+{
+ struct memory_stats stats;
+ s32 status = 0;
+ ulong size;
+
+ cargs->api_status = heapbuf_get_stats(cargs->args.get_stats.handle,
+ &stats);
+ if (status)
+ goto exit;
+
+ size = copy_to_user(cargs->args.get_stats.stats, &stats,
+ sizeof(struct memory_stats));
+ if (size)
+ status = -EFAULT;
+
+exit:
+ return status;
+}
+
+/*
+ * ======== heapbuf_ioctl_get_extended_stats ========
+ * Purpose:
+ * This ioctl interface to heapbuf_get_extended_stats function
+ */
+static int heapbuf_ioctl_get_extended_stats(struct heapbuf_cmd_args *cargs)
+{
+ struct heapbuf_extended_stats stats;
+ s32 status = 0;
+ ulong size;
+
+ cargs->api_status = heapbuf_get_extended_stats(
+ cargs->args.get_extended_stats.handle, &stats);
+ if (cargs->api_status != 0)
+ goto exit;
+
+ size = copy_to_user(cargs->args.get_extended_stats.stats, &stats,
+ sizeof(struct heapbuf_extended_stats));
+ if (size)
+ status = -EFAULT;
+
+exit:
+ return status;
+}
+
+/*
+ * ======== heapbuf_ioctl ========
+ * Purpose:
+ * This ioctl interface for heapbuf module
+ */
+int heapbuf_ioctl(struct inode *pinode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct heapbuf_cmd_args __user *uarg =
+ (struct heapbuf_cmd_args __user *)args;
+ struct heapbuf_cmd_args cargs;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct heapbuf_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_HEAPBUF_ALLOC:
+ status = heapbuf_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_HEAPBUF_FREE:
+ status = heapbuf_ioctl_free(&cargs);
+ break;
+
+ case CMD_HEAPBUF_PARAMS_INIT:
+ status = heapbuf_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_HEAPBUF_CREATE:
+ status = heapbuf_ioctl_create(&cargs);
+ break;
+
+ case CMD_HEAPBUF_DELETE:
+ status = heapbuf_ioctl_delete(&cargs);
+ break;
+
+ case CMD_HEAPBUF_OPEN:
+ status = heapbuf_ioctl_open(&cargs);
+ break;
+
+ case CMD_HEAPBUF_CLOSE:
+ status = heapbuf_ioctl_close(&cargs);
+ break;
+
+ case CMD_HEAPBUF_SHAREDMEMREQ:
+ status = heapbuf_ioctl_shared_memreq(&cargs);
+ break;
+
+ case CMD_HEAPBUF_GETCONFIG:
+ status = heapbuf_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_HEAPBUF_SETUP:
+ status = heapbuf_ioctl_setup(&cargs);
+ break;
+
+ case CMD_HEAPBUF_DESTROY:
+ status = heapbuf_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_HEAPBUF_GETSTATS:
+ status = heapbuf_ioctl_get_stats(&cargs);
+ break;
+
+ case CMD_HEAPBUF_GETEXTENDEDSTATS:
+ status = heapbuf_ioctl_get_extended_stats(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct heapbuf_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/ipc_drv.c b/drivers/dsp/syslink/multicore_ipc/ipc_drv.c
new file mode 100755
index 000000000000..2e9a36f53592
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/ipc_drv.c
@@ -0,0 +1,242 @@
+/*
+ * ipc_drv.c
+ *
+ * IPC driver module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/*#ifdef MODULE*/
+#include <linux/module.h>
+/*#endif*/
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/fs.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include <ipc_ioctl.h>
+#include <nameserver.h>
+
+#define IPC_NAME "syslink_ipc"
+#define IPC_MAJOR 0
+#define IPC_MINOR 0
+#define IPC_DEVICES 1
+
+struct ipc_device {
+ struct cdev cdev;
+};
+
+struct ipc_device *ipc_device;
+static struct class *ipc_class;
+
+s32 ipc_major = IPC_MAJOR;
+s32 ipc_minor = IPC_MINOR;
+char *ipc_name = IPC_NAME;
+
+module_param(ipc_name, charp, 0);
+MODULE_PARM_DESC(ipc_name, "Device name, default = syslink_ipc");
+
+module_param(ipc_major, int, 0); /* Driver's major number */
+MODULE_PARM_DESC(ipc_major, "Major device number, default = 0 (auto)");
+
+module_param(ipc_minor, int, 0); /* Driver's minor number */
+MODULE_PARM_DESC(ipc_minor, "Minor device number, default = 0 (auto)");
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
+
+/*
+ * ======== ipc_open ========
+ * This function is invoked when an application
+ * opens handle to the ipc driver
+ */
+int ipc_open(struct inode *inode, struct file *filp)
+{
+ s32 retval = 0;
+ struct ipc_device *dev;
+
+ dev = container_of(inode->i_cdev, struct ipc_device, cdev);
+ filp->private_data = dev;
+ return retval;
+}
+
+/*
+ * ======== ipc_release ========
+ * This function is invoked when an application
+ * closes handle to the ipc driver
+ */
+int ipc_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+ * ======== ipc_ioctl ========
+ * This function provides IO interface to the
+ * ipc driver
+ */
+int ipc_ioctl(struct inode *ip, struct file *filp, u32 cmd, ulong arg)
+{
+ s32 retval = 0;
+ void __user *argp = (void __user *)arg;
+
+ /* Verify the memory and ensure that it is not is kernel
+ address space
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ retval = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ retval = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
+
+ if (retval) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ retval = ipc_ioc_router(cmd, (ulong)argp);
+ return retval;
+
+exit:
+ return retval;
+}
+
+const struct file_operations ipc_fops = {
+open: ipc_open,
+release : ipc_release,
+ioctl : ipc_ioctl,
+};
+
+/*
+ * ======== ipc_modules_Init ========
+ * IPC Initialization routine. will initialize various
+ * sub components (modules) of IPC.
+ */
+static int ipc_modules_init(void)
+{
+ return 0;
+}
+
+/*
+ * ======== ipc_modules_Init ========
+ * IPC cleanup routine. will cleanup of various
+ * sub components (modules) of IPC.
+ */
+static void ipc_modules_exit(void)
+{
+
+}
+
+/*
+ * ======== ipc_init ========
+ * Initialization routine. Executed when the driver is
+ * loaded (as a kernel module), or when the system
+ * is booted (when included as part of the kernel
+ * image).
+ */
+static int __init ipc_init(void)
+{
+ dev_t dev ;
+ s32 retval = 0;
+
+ /* 2.6 device model */
+ if (ipc_major) {
+ dev = MKDEV(ipc_major, ipc_minor);
+ retval = register_chrdev_region(dev, IPC_DEVICES, ipc_name);
+ } else {
+ retval = alloc_chrdev_region(&dev, ipc_minor, IPC_DEVICES,
+ ipc_name);
+ ipc_major = MAJOR(dev);
+ }
+
+ if (retval < 0) {
+ printk(KERN_ERR "ipc_init: can't get major %x \n", ipc_major);
+ goto exit;
+ }
+
+ ipc_device = kmalloc(sizeof(struct ipc_device), GFP_KERNEL);
+ if (!ipc_device) {
+ printk(KERN_ERR "ipc_init: memory allocation failed for "
+ "ipc_device \n");
+ retval = -ENOMEM;
+ goto unreg_exit;
+ }
+
+ memset(ipc_device, 0, sizeof(struct ipc_device));
+ retval = ipc_modules_init();
+ if (retval) {
+ printk(KERN_ERR "ipc_init: ipc initialization failed \n");
+ goto unreg_exit;
+
+ }
+ /* TO DO : NEED TO LOOK IN TO THIS */
+ ipc_class = class_create(THIS_MODULE, "syslink_ipc");
+ if (IS_ERR(ipc_class)) {
+ printk(KERN_ERR "ipc_init: error creating ipc class \n");
+ retval = PTR_ERR(ipc_class);
+ goto unreg_exit;
+ }
+
+ device_create(ipc_class, NULL, MKDEV(ipc_major, ipc_minor), NULL,
+ ipc_name);
+ cdev_init(&ipc_device->cdev, &ipc_fops);
+ ipc_device->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&ipc_device->cdev, dev, IPC_DEVICES);
+ if (retval) {
+ printk(KERN_ERR "ipc_init: failed to add the ipc device \n");
+ goto class_exit;
+ }
+ return retval;
+
+class_exit:
+ class_destroy(ipc_class);
+
+unreg_exit:
+ unregister_chrdev_region(dev, IPC_DEVICES);
+ kfree(ipc_device);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== ipc_init ========
+ * This function is invoked during unlinking of ipc
+ * module from the kernel. ipc resources are
+ * freed in this function.
+ */
+static void __exit ipc_exit(void)
+{
+ dev_t devno;
+
+ ipc_modules_exit();
+ devno = MKDEV(ipc_major, ipc_minor);
+ if (ipc_device) {
+ cdev_del(&ipc_device->cdev);
+ kfree(ipc_device);
+ }
+ unregister_chrdev_region(devno, IPC_DEVICES);
+ if (ipc_class) {
+ /* remove the device from sysfs */
+ device_destroy(ipc_class, MKDEV(ipc_major, ipc_minor));
+ class_destroy(ipc_class);
+ }
+}
+
+/*
+ * ipc driver initialization and de-initialization functions
+ */
+module_init(ipc_init);
+module_exit(ipc_exit);
diff --git a/drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c b/drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c
new file mode 100755
index 000000000000..b40a1e192e76
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c
@@ -0,0 +1,80 @@
+/*
+ * ipc_ioctl.c
+ *
+ * This is the collection of ioctl functions that will invoke various ipc
+ * module level functions based on user comands
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+
+#include <ipc_ioctl.h>
+#include <multiproc_ioctl.h>
+#include <nameserver_ioctl.h>
+#include <heapbuf_ioctl.h>
+#include <sharedregion_ioctl.h>
+#include <gatepeterson_ioctl.h>
+#include <listmp_sharedmemory_ioctl.h>
+#include <messageq_ioctl.h>
+#include <messageq_transportshm_ioctl.h>
+#include <nameserver_remotenotify_ioctl.h>
+#include <sysmgr_ioctl.h>
+#include <sysmemmgr_ioctl.h>
+
+/*
+ * ======== ipc_ioctl_router ========
+ * Purpose:
+ * This will route the ioctl commands to proper
+ * modules
+ */
+int ipc_ioc_router(u32 cmd, ulong arg)
+{
+ s32 retval = 0;
+ u32 ioc_nr = _IOC_NR(cmd);
+
+ if (ioc_nr >= MULTIPROC_BASE_CMD && ioc_nr <= MULTIPROC_END_CMD)
+ retval = multiproc_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= NAMESERVER_BASE_CMD &&
+ ioc_nr <= NAMESERVER_END_CMD)
+ retval = nameserver_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= HEAPBUF_BASE_CMD && ioc_nr <= HEAPBUF_END_CMD)
+ retval = heapbuf_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= SHAREDREGION_BASE_CMD &&
+ ioc_nr <= SHAREDREGION_END_CMD)
+ retval = sharedregion_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= GATEPETERSON_BASE_CMD &&
+ ioc_nr <= GATEPETERSON_END_CMD)
+ retval = gatepeterson_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= LISTMP_SHAREDMEMORY_BASE_CMD &&
+ ioc_nr <= LISTMP_SHAREDMEMORY_END_CMD)
+ retval = listmp_sharedmemory_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= MESSAGEQ_BASE_CMD &&
+ ioc_nr <= MESSAGEQ_END_CMD)
+ retval = messageq_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= MESSAGEQ_TRANSPORTSHM_BASE_CMD &&
+ ioc_nr <= MESSAGEQ_TRANSPORTSHM_END_CMD)
+ retval = messageq_transportshm_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= NAMESERVERREMOTENOTIFY_BASE_CMD &&
+ ioc_nr <= NAMESERVERREMOTENOTIFY_END_CMD)
+ retval = nameserver_remotenotify_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= SYSMGR_BASE_CMD &&
+ ioc_nr <= SYSMGR_END_CMD)
+ retval = sysmgr_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= SYSMEMMGR_BASE_CMD &&
+ ioc_nr <= SYSMEMMGR_END_CMD)
+ retval = sysmemmgr_ioctl(NULL, NULL, cmd, arg);
+ else
+ retval = -ENOTTY;
+
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/listmp.c b/drivers/dsp/syslink/multicore_ipc/listmp.c
new file mode 100644
index 000000000000..742c67f58f4c
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/listmp.c
@@ -0,0 +1,440 @@
+/*
+ * listmp_sharedmemory.c
+ *
+ * The listmp_sharedmemory is a linked-list based module designed to be
+ * used in a multi-processor environment. It is designed to
+ * provide a means of communication between different processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/*!
+ *
+ * This module implements the ListMP.
+ * This module is instance based. Each instance requires a small
+ * piece of shared memory. This is specified via the #shared_addr
+ * parameter to the create. The proper #shared_addr_size parameter
+ * can be determined via the #sharedMemReq call. Note: the
+ * parameters to this function must be the same that will used to
+ * create or open the instance.
+ * The ListMP module uses a #ti.sdo.utils.NameServer instance
+ * to store instance information when an instance is created and
+ * the name parameter is non-NULL. If a name is supplied, it must
+ * be unique for all ListMP instances.
+ * The #create also initializes the shared memory as needed. The
+ * shared memory must be initialized to 0 or all ones
+ * (e.g. 0xFFFFFFFFF) before the ListMP instance is created.
+ * Once an instance is created, an open can be performed. The #open
+ * is used to gain access to the same ListMP instance.
+ * Generally an instance is created on one processor and opened
+ * on the other processor.
+ * The open returns a ListMP instance handle like the create,
+ * however the open does not modify the shared memory. Generally an
+ * instance is created on one processor and opened on the other
+ * processor.
+ * There are two options when opening the instance:
+ * @li Supply the same name as specified in the create. The
+ * ListMP module queries the NameServer to get the needed
+ * information.
+ * @li Supply the same #shared_addr value as specified in the
+ * create. If the open is called before the instance is created,
+ * open returns NULL.
+ * There is currently a list of restrictions for the module:
+ * @li Both processors must have the same endianness. Endianness
+ * conversion may supported in a future version of ListMP.
+ * @li The module will be made a gated module
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/list.h>
+
+/* Module level headers */
+#include <nameserver.h>
+#include <listmp.h>
+#include <listmp_sharedmemory.h>
+
+
+/*
+ * ======== listmp_params_init ========
+ * Purpose:
+ * Function initializes listmp parameters
+ */
+void listmp_params_init(void *listmp_handle, struct listmp_params *params)
+{
+ BUG_ON(params == NULL);
+ listmp_sharedmemory_params_init(listmp_handle, params);
+}
+
+/*
+ * ======== listmp_shared_memreq ========
+ * Purpose:
+ * Function to get shared memory requirement for the module
+ */
+int listmp_shared_memreq(struct listmp_params *params)
+{
+ int shared_memreq = 0;
+
+ if (WARN_ON(params == NULL))
+ goto exit;
+
+ shared_memreq = listmp_sharedmemory_shared_memreq(params);
+exit:
+ return shared_memreq;
+}
+
+/*
+ * ======== listmp_create ========
+ * Purpose:
+ * Creates a new instance of listmp_sharedmemory module.
+ */
+void *listmp_create(struct listmp_params *params)
+{
+ struct listmp_object *handle = NULL;
+
+ if (WARN_ON(params == NULL))
+ goto exit;
+
+ if (params->list_type == listmp_type_SHARED)
+ handle = (struct listmp_object *)
+ listmp_sharedmemory_create(params);
+ return (void *)handle;
+
+exit:
+ printk(KERN_ERR "listmp_create: listmp_params passed are NULL!\n");
+ return NULL;
+}
+
+/*
+ * ======== listmp_delete ========
+ * Purpose:
+ * Deletes an instance of listmp_sharedmemory module.
+ */
+int listmp_delete(void **listmp_handle_ptr)
+{
+ int status = 0;
+
+ if (WARN_ON(*listmp_handle_ptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (((struct listmp_object *)(*listmp_handle_ptr))->list_type == \
+ listmp_type_SHARED)
+ status = listmp_sharedmemory_delete(listmp_handle_ptr);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_delete failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_open ========
+ * Purpose:
+ * Opens an instance ofa previously created listmp_sharedmemory module.
+ */
+int listmp_open(void **listmp_handle_ptr, struct listmp_params *params)
+{
+ int status = 0;
+
+ if (WARN_ON(listmp_handle_ptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (params->list_type == listmp_type_SHARED)
+ status = listmp_sharedmemory_open(listmp_handle_ptr, params);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "listmp_open failed! status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======== listmp_close ========
+ * Purpose:
+ * Closes an instance of a previously opened listmp_sharedmemory module.
+ */
+int listmp_close(void *listmp_handle)
+{
+ int status = 0;
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (((struct listmp_object *)listmp_handle)->list_type == \
+ listmp_type_SHARED)
+ status = listmp_sharedmemory_close(listmp_handle);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "listmp_close failed! status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======== listmp_empty ========
+ * Purpose:
+ * Function to check if list is empty
+ */
+bool listmp_empty(void *listmp_handle)
+{
+ bool isEmpty = false;
+ struct listmp_object *handle = NULL;
+
+ if (WARN_ON(listmp_handle == NULL))
+ goto exit;
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->empty == NULL);
+ isEmpty = handle->empty(listmp_handle);
+
+exit:
+ return isEmpty;
+}
+
+/*
+ * ======== listmp_get_head ========
+ * Purpose:
+ * Function to get head element from a listmp_sharedmemory list
+ */
+void *listmp_get_head(void *listmp_handle)
+{
+ struct listmp_object *handle = NULL;
+ struct listmp_elem *elem = NULL;
+
+ if (WARN_ON(listmp_handle == NULL))
+ goto exit;
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->get_head == NULL);
+ elem = handle->get_head(listmp_handle);
+
+exit:
+ return elem;
+}
+
+/*
+ * ======== listmp_get_tail =====
+ * Purpose:
+ * Function to get tailement from a listmp_sharedmemory list
+ */
+void *listmp_get_tail(void *listmp_handle)
+{
+ struct listmp_object *handle = NULL;
+ struct listmp_elem *elem = NULL;
+
+ if (WARN_ON(listmp_handle == NULL))
+ goto exit;
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->get_tail == NULL);
+ elem = handle->get_tail(listmp_handle);
+
+exit:
+ return elem;
+}
+
+/*
+ * ======== listmp_put_head ========
+ * Purpose:
+ * Function to put head element into a listmp_sharedmemory list
+ */
+int listmp_put_head(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *handle = NULL;
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct listmp_object *) listmp_handle;
+
+ BUG_ON(handle->put_head == NULL);
+ status = handle->put_head(listmp_handle, elem);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_put_head failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_put_tail ========
+ * Purpose:
+ * Function to put tail element into a listmp_sharedmemory list
+ */
+int listmp_put_tail(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *handle = NULL;
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->put_tail == NULL);
+ status = handle->put_tail(listmp_handle, elem);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_put_tail failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_insert ========
+ * Purpose:
+ * Function to insert element into a listmp_sharedmemory list
+ */
+int listmp_insert(void *listmp_handle, struct listmp_elem *elem,
+ struct listmp_elem *curElem)
+{
+ int status = 0;
+ struct listmp_object *handle = NULL;
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(curElem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->insert == NULL);
+ status = handle->insert(listmp_handle, elem, curElem);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_insert failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_remove ========
+ * Purpose:
+ * Function to remove an element from a listmp_sharedmemory list
+ */
+int listmp_remove(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = LISTMP_SUCCESS;
+ struct listmp_object *handle = NULL;
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->remove == NULL);
+ status = handle->remove(listmp_handle, elem);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_remove failed! status = 0x%x\n",
+ status);
+ }
+ return status ;
+}
+
+/*
+ * ======== listmp_next ========
+ * Purpose:
+ * Function to return the next element from a listmp_sharedmemory list
+ */
+void *listmp_next(void *listmp_handle, struct listmp_elem *elem)
+{
+ struct listmp_object *handle = NULL;
+ struct listmp_elem *nextElem = NULL;
+
+ if (WARN_ON(listmp_handle == NULL))
+ goto exit;
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->next == NULL);
+ nextElem = handle->next(listmp_handle, elem);
+
+exit:
+ return nextElem;
+}
+
+/*
+ * ======== listmp_next ========
+ * Purpose:
+ * Function to return the prev element from a listmp_sharedmemory list
+ */
+void *listmp_prev(void *listmp_handle, struct listmp_elem *elem)
+{
+ struct listmp_object *handle = NULL;
+ struct listmp_elem *prevElem = NULL;
+
+ if (WARN_ON(listmp_handle == NULL))
+ goto exit;
+
+ handle = (struct listmp_object *)listmp_handle;
+
+ BUG_ON(handle->prev == NULL);
+ prevElem = handle->prev(listmp_handle, elem);
+
+exit:
+ return prevElem;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory.c b/drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory.c
new file mode 100755
index 000000000000..1e49d832c91d
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory.c
@@ -0,0 +1,1487 @@
+/*
+ * listmp_sharedmemory.c
+ *
+ * The listmp_sharedmemory is a linked-list based module designed to be
+ * used in a multi-processor environment. It is designed to
+ * provide a means of communication between different processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/*!
+ * This module is instance based. Each instance requires a small
+ * piece of shared memory. This is specified via the #shared_addr
+ * parameter to the create. The proper #shared_addr_size parameter
+ * can be determined via the #shared_memreq call. Note: the
+ * parameters to this function must be the same that will used to
+ * create or open the instance.
+ * The listmp_sharedmemory module uses a #NameServer instance
+ * to store instance information when an instance is created and
+ * the name parameter is non-NULL. If a name is supplied, it must
+ * be unique for all listmp_sharedmemory instances.
+ * The #create also initializes the shared memory as needed. The
+ * shared memory must be initialized to 0 or all ones
+ * (e.g. 0xFFFFFFFFF) before the listmp_sharedmemory instance
+ * is created.
+ * Once an instance is created, an open can be performed. The #open
+ * is used to gain access to the same listmp_sharedmemory instance.
+ * Generally an instance is created on one processor and opened
+ * on the other processor.
+ * The open returns a listmp_sharedmemory instance handle like the
+ * create, however the open does not modify the shared memory.
+ * Generally an instance is created on one processor and opened
+ * on the other processor.
+ * There are two options when opening the instance:
+ * @li Supply the same name as specified in the create. The
+ * listmp_sharedmemory module queries the NameServer to get the
+ * needed information.
+ * @li Supply the same #shared_addr value as specified in the
+ * create.
+ * If the open is called before the instance is created, open
+ * returns NULL.
+ * There is currently a list of restrictions for the module:
+ * @li Both processors must have the same endianness. Endianness
+ * conversion may supported in a future version of
+ * listmp_sharedmemory.
+ * @li The module will be made a gated module
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utilities headers */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+
+/* Module level headers */
+#include <nameserver.h>
+#include <sharedregion.h>
+#include <multiproc.h>
+#include "_listmp.h"
+#include <listmp.h>
+#include <gatepeterson.h>
+#include <listmp_sharedmemory.h>
+
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*! @brief Macro to make a correct module magic number with refCount */
+#define LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(x) \
+ ((LISTMPSHAREDMEMORY_MODULEID << 12u) | (x))
+
+/*!
+ * @brief Name of the reserved NameServer used for listmp_sharedmemory.
+ */
+#define LISTMP_SHAREDMEMORY_NAMESERVER "ListMPSharedMemory"
+
+/*!
+ * @brief Cache size
+ */
+#define LISTMP_SHAREDMEMORY_CACHESIZE 128
+
+
+
+/* =============================================================================
+ * Structures and Enums
+ * =============================================================================
+ */
+/*! @brief structure for listmp_sharedmemory module state */
+struct listmp_sharedmemory_module_object {
+ atomic_t ref_count;
+ /*!< Reference count */
+ void *ns_handle;
+ /*!< Handle to the local NameServer used for storing GP objects */
+ struct list_head obj_list;
+ /*!< List holding created listmp_sharedmemory objects */
+ struct mutex *local_gate;
+ /*!< Handle to lock for protecting obj_list */
+ struct listmp_config cfg;
+ /*!< Current config values */
+ struct listmp_config default_cfg;
+ /*!< Default config values */
+ listmp_sharedmemory_params default_inst_params;
+ /*!< Default instance creation parameters */
+};
+
+/*!
+ * @var listmp_sharedmemory_nameServer
+ *
+ * @brief Name of the reserved NameServer used for listmp_sharedmemory.
+ */
+static
+struct listmp_sharedmemory_module_object listmp_sharedmemory_state = {
+ .default_cfg.max_name_len = 32,
+ .default_cfg.use_name_server = true,
+ .default_inst_params.shared_addr = 0,
+ .default_inst_params.shared_addr_size = 0,
+ .default_inst_params.name = NULL,
+ .default_inst_params.gate = NULL,
+ .default_inst_params.list_type = listmp_type_SHARED};
+
+/*!
+ * @brief Structure for the internal Handle for the listmp_sharedmemory.
+ */
+struct listmp_sharedmemory_obj{
+ struct list_head list_elem;
+ /*!< Used for creating a linked list */
+ VOLATILE struct listmp_elem *listmp_elem;
+ /*!< Used for storing listmp_sharedmemory element */
+ struct listmp_proc_attrs *owner;
+ /*!< Creator's attributes associated with an instance */
+ listmp_sharedmemory_params params;
+ /*!< the parameter structure */
+ void *ns_key;
+ u32 index;
+ /*!< the index for SrPtr */
+ VOLATILE struct listmp_attrs *attrs;
+ /*!< Shared memory attributes */
+ void *top;
+ /*!< Pointer to the top Object */
+};
+
+
+/* =============================================================================
+ * Function definitions
+ * =============================================================================
+ */
+/*
+ * ======== _listmp_sharedmemory_create ========
+ * Purpose:
+ * Creates a new instance of listmp_sharedmemory module. This is an internal
+ * function because both listmp_sharedmemory_create and
+ * listmp_sharedmemory_open call use the same functionality.
+ */
+static listmp_sharedmemory_handle
+_listmp_sharedmemory_create(listmp_sharedmemory_params *params,
+ u32 create_flag);
+
+
+/* =============================================================================
+ * Function API's
+ * =============================================================================
+ */
+/*
+ * ======== listmp_sharedmemory_get_config ========
+ * Purpose:
+ * Function to get configuration parameters to setup the
+ * the listmp_sharedmemory module.
+ */
+int listmp_sharedmemory_get_config(struct listmp_config *cfgParams)
+{
+ int status = 0;
+
+ if (WARN_ON(cfgParams == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true)
+ /* If setup has not yet been called) */
+ memcpy(cfgParams, &listmp_sharedmemory_state.default_cfg,
+ sizeof(struct listmp_config));
+ else
+ memcpy(cfgParams, &listmp_sharedmemory_state.cfg,
+ sizeof(struct listmp_config));
+ return 0;
+
+exit:
+ printk(KERN_ERR "listmp_sharedmemory_get_config failed: "
+ "status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_setup ========
+ * Purpose:
+ * Function to setup the listmp_sharedmemory module.
+ */
+int listmp_sharedmemory_setup(struct listmp_config *config)
+{
+ int status = 0;
+ int status1 = 0;
+ void *nshandle = NULL;
+ struct nameserver_params params;
+ struct listmp_config tmp_cfg;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&listmp_sharedmemory_state.ref_count,
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&listmp_sharedmemory_state.ref_count)
+ != LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (config == NULL) {
+ listmp_sharedmemory_get_config(&tmp_cfg);
+ config = &tmp_cfg;
+ }
+
+ if (WARN_ON(config->max_name_len == 0)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (likely((config->use_name_server == true))) {
+ /* Initialize the parameters */
+ nameserver_params_init(&params);
+ params.max_value_len = 4;
+ params.max_name_len = config->max_name_len;
+ /* Create the nameserver for modules */
+ nshandle = nameserver_create(
+ LISTMP_SHAREDMEMORY_NAMESERVER, &params);
+ if (unlikely(nshandle == NULL)) {
+ status = LISTMPSHAREDMEMORY_E_FAIL;
+ goto exit;
+ }
+ }
+
+ listmp_sharedmemory_state.ns_handle = nshandle;
+ /* Construct the list object */
+ INIT_LIST_HEAD(&listmp_sharedmemory_state.obj_list);
+ /* Create a lock for protecting list object */
+ listmp_sharedmemory_state.local_gate = \
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (listmp_sharedmemory_state.local_gate == NULL) {
+ status = -ENOMEM;
+ goto clean_nameserver;
+ }
+
+ mutex_init(listmp_sharedmemory_state.local_gate);
+ /* Copy the cfg */
+ memcpy(&listmp_sharedmemory_state.cfg, config,
+ sizeof(struct listmp_config));
+ return 0;
+
+clean_nameserver:
+ printk(KERN_ERR "listmp_sharedmemory_setup: Failed to create the local "
+ "gate! status = 0x%x\n", status);
+ atomic_set(&listmp_sharedmemory_state.ref_count,
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0));
+ status1 = nameserver_delete
+ (&(listmp_sharedmemory_state.ns_handle));
+ BUG_ON(status1 < 0);
+
+exit:
+ printk(KERN_ERR "listmp_sharedmemory_setup failed! status = 0x%x\n",
+ status);
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_destroy ========
+ * Purpose:
+ * Function to destroy the listmp_sharedmemory module.
+ */
+int listmp_sharedmemory_destroy(void)
+{
+ int status = 0;
+ int status1 = 0;
+ struct list_head *elem = NULL;
+ struct list_head *head = &listmp_sharedmemory_state.obj_list;
+ struct list_head *next;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (atomic_dec_return(&listmp_sharedmemory_state.ref_count)
+ == LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0)) {
+ /* Temporarily increment refCount here. */
+ atomic_set(&listmp_sharedmemory_state.ref_count,
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1));
+ /* Check if any listmp_sharedmemory instances have not been
+ * deleted so far. If not, delete them. */
+ for (elem = (head)->next; elem != (head); elem = next) {
+ /* Retain the next pointer so it doesn't
+ get overwritten */
+ next = elem->next;
+ if (((struct listmp_sharedmemory_obj *) elem)->owner
+ ->proc_id == multiproc_get_id(NULL)) {
+ status1 = listmp_sharedmemory_delete(
+ (listmp_sharedmemory_handle *)
+ &(((struct listmp_sharedmemory_obj *) \
+ elem)->top));
+ WARN_ON(status1 < 0);
+ } else {
+ status1 = listmp_sharedmemory_close(
+ (listmp_sharedmemory_handle)
+ (((struct listmp_sharedmemory_obj *) \
+ elem)->top));
+ WARN_ON(status1 < 0);
+ }
+ }
+
+ /* Again reset refCount. */
+ atomic_set(&listmp_sharedmemory_state.ref_count,
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0));
+ if (likely(listmp_sharedmemory_state.cfg.use_name_server
+ == true)) {
+ /* Delete the nameserver for modules */
+ status = nameserver_delete(
+ &(listmp_sharedmemory_state.ns_handle));
+ BUG_ON(status < 0);
+ }
+
+ /* Destruct the list object */
+ list_del(&listmp_sharedmemory_state.obj_list);
+ /* Delete the list lock */
+ kfree(listmp_sharedmemory_state.local_gate);
+ listmp_sharedmemory_state.local_gate = NULL;
+ memset(&listmp_sharedmemory_state.cfg, 0,
+ sizeof(struct listmp_config));
+ /* Decrease the refCount */
+ atomic_set(&listmp_sharedmemory_state.ref_count,
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0));
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_destroy failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_params_init ========
+ * Purpose:
+ * Function to initialize the config-params structure with supplier-specified
+ * defaults before instance creation.
+ */
+void listmp_sharedmemory_params_init(listmp_sharedmemory_handle handle,
+ listmp_sharedmemory_params *params)
+{
+ s32 status = 0;
+ struct listmp_sharedmemory_obj *obj = NULL;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (handle == NULL) {
+ memcpy(params,
+ &(listmp_sharedmemory_state.default_inst_params),
+ sizeof(listmp_sharedmemory_params));
+ } else {
+ obj = (struct listmp_sharedmemory_obj *)
+ (((listmp_sharedmemory_object *) handle)->obj);
+
+ memcpy((void *)&obj->params,
+ (void *)params,
+ sizeof(listmp_sharedmemory_params));
+ }
+ return;
+
+exit:
+ printk(KERN_ERR "listmp_sharedmemory_params_init failed! "
+ "status = 0x%x\n", status);
+ return;
+}
+
+/*
+ * ======== listmp_sharedmemory_create ========
+ * Purpose:
+ * Creates a new instance of listmp_sharedmemory module.
+ */
+listmp_sharedmemory_handle listmp_sharedmemory_create(
+ listmp_sharedmemory_params *params)
+{
+ s32 status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON((params->shared_addr == (u32)NULL)
+ && (params->list_type != listmp_type_FAST))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(params->shared_addr_size
+ < listmp_sharedmemory_shared_memreq(params))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)
+ _listmp_sharedmemory_create(params, true);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_create failed! "
+ "status = 0x%x\n", status);
+ }
+ return (listmp_sharedmemory_handle) handle;
+}
+
+/*
+ * ======== listmp_sharedmemory_delete ========
+ * Purpose:
+ * Deletes a instance of listmp_sharedmemory instance object.
+ */
+int listmp_sharedmemory_delete(listmp_sharedmemory_handle *listmp_handleptr)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ listmp_sharedmemory_params *params = NULL;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(listmp_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*listmp_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *) (*listmp_handleptr);
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+ params = (listmp_sharedmemory_params *) &obj->params;
+
+ if (obj->owner->proc_id != multiproc_get_id(NULL)) {
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (obj->owner->open_count > 1) {
+ status = -EBUSY;
+ goto exit;
+ }
+
+ if (obj->owner->open_count != 1) {
+ status = -EBUSY;
+ goto exit;
+ }
+
+ /* Remove from the local list */
+ status = mutex_lock_interruptible(
+ listmp_sharedmemory_state.local_gate);
+ if (status)
+ goto exit;
+ list_del(&obj->list_elem);
+ mutex_unlock(listmp_sharedmemory_state.local_gate);
+
+ if (likely(params->name != NULL)) {
+ /* Free memory for the name */
+ kfree(params->name);
+ /* Remove from the name server */
+ if (likely(listmp_sharedmemory_state.cfg.use_name_server)) {
+ if (obj->ns_key != NULL) {
+ nameserver_remove_entry(
+ listmp_sharedmemory_state.ns_handle,
+ obj->ns_key);
+ obj->ns_key = NULL;
+ }
+ }
+ }
+
+ /* Free memory for the processor info's */
+ if (obj)
+ kfree(obj->owner);
+ /* Now free the handle */
+ kfree(obj);
+ obj = NULL;
+
+ /* Now free the handle */
+ kfree(handle);
+ handle = NULL;
+ *listmp_handleptr = NULL;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_delete failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+
+}
+
+
+/*
+ * ======== listmp_sharedmemory_shared_memreq ========
+ * Purpose:
+ * Function to return the amount of shared memory required for creation of
+ * each instance.
+ */
+int listmp_sharedmemory_shared_memreq(listmp_sharedmemory_params *params)
+{
+ int retval = 0;
+
+ if (WARN_ON(params == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (params->list_type == listmp_type_SHARED)
+ retval = (LISTMP_SHAREDMEMORY_CACHESIZE * 2);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_shared_memreq failed! "
+ "retval = 0x%x\n", retval);
+ }
+ /*! @retval ((1 * sizeof(struct listmp_elem))
+ *! + 1 * sizeof(struct listmp_attrs)) if params is null */
+ /*! @retval (2 * cacheSize) if params is provided */
+ /*! @retval (0) if hardware queue */
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_open ========
+ * Purpose:
+ * Function to open a listmp_sharedmemory instance
+ */
+int listmp_sharedmemory_open(listmp_sharedmemory_handle *listmp_handleptr,
+ listmp_sharedmemory_params *params)
+{
+ int status = 0;
+ bool done_flag = false;
+ struct list_head *elem;
+ u32 shared_shm_base;
+ struct listmp_attrs *attrs;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(listmp_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON((listmp_sharedmemory_state.cfg.use_name_server == false)
+ && (params->shared_addr == (u32)NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON((listmp_sharedmemory_state.cfg.use_name_server == true)
+ && (params->shared_addr == (u32)NULL))
+ && (params->name == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* First check in the local list */
+ list_for_each(elem, &listmp_sharedmemory_state.obj_list) {
+ if (((struct listmp_sharedmemory_obj *)elem)->params.shared_addr
+ == params->shared_addr) {
+ status = mutex_lock_interruptible(
+ listmp_sharedmemory_state.local_gate);
+ if (status)
+ goto exit;
+ if (((struct listmp_sharedmemory_obj *)elem)
+ ->owner->proc_id
+ == multiproc_get_id(NULL))
+ ((struct listmp_sharedmemory_obj *)elem)
+ ->owner->open_count++;
+ mutex_unlock(
+ listmp_sharedmemory_state.local_gate);
+ *listmp_handleptr = \
+ (((struct listmp_sharedmemory_obj *)
+ elem)->top);
+ done_flag = true;
+ break;
+ } else if ((params->name != NULL)
+ && (((struct listmp_sharedmemory_obj *)elem) \
+ ->params.name != NULL)){
+ if (strcmp(((struct listmp_sharedmemory_obj *)elem)
+ ->params.name, params->name) == 0) {
+ status = mutex_lock_interruptible(
+ listmp_sharedmemory_state.local_gate);
+ if (status)
+ goto exit;
+ if (((struct listmp_sharedmemory_obj *)elem)
+ ->owner->proc_id
+ == multiproc_get_id(NULL))
+ ((struct listmp_sharedmemory_obj *)elem)
+ ->owner->open_count++;
+ mutex_unlock(
+ listmp_sharedmemory_state.local_gate);
+ *listmp_handleptr = \
+ (((struct listmp_sharedmemory_obj *)
+ elem)->top);
+ done_flag = true;
+ break;
+ }
+ }
+ }
+
+ if (likely(done_flag == false)) {
+ if (unlikely(params->shared_addr == NULL)) {
+ if (likely(listmp_sharedmemory_state.cfg.use_name_server
+ == true)){
+ /* Find in name server */
+ status =
+ nameserver_get(
+ listmp_sharedmemory_state.ns_handle,
+ params->name,
+ &shared_shm_base,
+ sizeof(u32),
+ NULL);
+ if (status >= 0)
+ params->shared_addr =
+ sharedregion_get_ptr(
+ (u32 *)shared_shm_base);
+ if (params->shared_addr == NULL) {
+ status =
+ LISTMPSHAREDMEMORY_E_NOTCREATED;
+ goto exit;
+ }
+ }
+ }
+ }
+
+ if (status >= 0) {
+ attrs = (struct listmp_attrs *) (params->shared_addr);
+ if (unlikely(attrs->status != (LISTMP_SHAREDMEMORY_CREATED)))
+ status = LISTMPSHAREDMEMORY_E_NOTCREATED;
+ else if (unlikely(attrs->version !=
+ (LISTMP_SHAREDMEMORY_VERSION)))
+ status = LISTMPSHAREDMEMORY_E_VERSION;
+ }
+
+ if (likely(status >= 0))
+ *listmp_handleptr = (listmp_sharedmemory_handle)
+ _listmp_sharedmemory_create(params, false);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_open failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_close ========
+ * Purpose:
+ * Function to close a previously opened instance
+ */
+int listmp_sharedmemory_close(listmp_sharedmemory_handle listmp_handle)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ listmp_sharedmemory_params *params = NULL;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+ if (obj == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(
+ listmp_sharedmemory_state.local_gate);
+ if (status)
+ goto exit;
+ if (obj->owner->proc_id == multiproc_get_id(NULL))
+ (obj)->owner->open_count--;
+
+ params = (listmp_sharedmemory_params *) &obj->params;
+ /* Check if ListMP is opened on same processor*/
+ if (((struct listmp_sharedmemory_obj *)obj)->owner->creator == false) {
+ list_del(&obj->list_elem);
+ /* remove from the name server */
+ if (params->name != NULL)
+ /* Free memory for the name */
+ kfree(params->name);
+
+ kfree(obj->owner);
+ kfree(obj);
+ obj = NULL;
+ kfree(handle);
+ handle = NULL;
+ }
+
+ mutex_unlock(listmp_sharedmemory_state.local_gate);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_close failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_empty ========
+ * Purpose:
+ * Function to check if the shared memory list is empty
+ */
+bool listmp_sharedmemory_empty(listmp_sharedmemory_handle listmp_handle)
+{
+
+ int status = 0;
+ bool is_empty = false;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ s32 retval = 0;
+ struct listmp_elem *sharedHead;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0) {
+ status = -EINVAL;
+ goto exit;
+ }
+ }
+
+ /*! @retval true if list is empty */
+ sharedHead = (struct listmp_elem *)(sharedregion_get_srptr(
+ (void *)obj->listmp_elem, obj->index));
+ dsb();
+ if (obj->listmp_elem->next == sharedHead)
+ is_empty = true;
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ return is_empty;
+}
+
+/*
+ * ======== listmp_sharedmemory_get_head ========
+ * Purpose:
+ * Function to get head element from a shared memory list
+ */
+void *listmp_sharedmemory_get_head(listmp_sharedmemory_handle listmp_handle)
+{
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *localHeadNext = NULL;
+ struct listmp_elem *localNext = NULL;
+ struct listmp_elem *sharedHead = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true)
+ goto exit;
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ /*! @retval NULL if listmp_handle passed is NULL */
+ elem = NULL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0)
+ goto exit;
+ }
+
+ localHeadNext = sharedregion_get_ptr((u32 *)obj->listmp_elem->next);
+ dsb();
+ /* See if the listmp_sharedmemory_object was empty */
+ if (localHeadNext == (struct listmp_elem *)obj->listmp_elem) {
+ /*! @retval NULL if list is empty */
+ elem = NULL ;
+ } else {
+ /* Elem to return */
+ elem = localHeadNext;
+ dsb();
+ localNext = sharedregion_get_ptr((u32 *)elem->next);
+ sharedHead = (struct listmp_elem *) sharedregion_get_srptr(
+ (void *)obj->listmp_elem, obj->index);
+
+ /* Fix the head of the list next pointer */
+ obj->listmp_elem->next = elem->next;
+ dsb();
+ /* Fix the prev pointer of the new first elem on the
+ list */
+ localNext->prev = sharedHead;
+ }
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ return elem;
+}
+
+/*
+ * ======== listmp_sharedmemory_get_tail ========
+ * Purpose:
+ * Function to get tail element from a shared memory list
+ */
+void *listmp_sharedmemory_get_tail(listmp_sharedmemory_handle listmp_handle)
+{
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *localHeadNext = NULL;
+ struct listmp_elem *localHeadPrev = NULL;
+ struct listmp_elem *localPrev = NULL;
+ struct listmp_elem *sharedHead = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true)
+ goto exit;
+
+ if (WARN_ON(listmp_sharedmemory_state.ns_handle == NULL)) {
+ /*! @retval NULL if Module was not initialized */
+ elem = NULL;
+ goto exit;
+ }
+ if (WARN_ON(listmp_handle == NULL)) {
+ /*! @retval NULL if listmp_handle passed is NULL */
+ elem = NULL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0)
+ goto exit;
+ }
+
+ localHeadNext = sharedregion_get_ptr((u32 *)obj->listmp_elem->next);
+ localHeadPrev = sharedregion_get_ptr((u32 *)obj->listmp_elem->prev);
+
+ /* See if the listmp_sharedmemory_object was empty */
+ if (localHeadNext == (struct listmp_elem *)obj->listmp_elem) {
+ /* Empty, return NULL */
+ elem = NULL ;
+ } else {
+ /* Elem to return */
+ elem = localHeadPrev;
+ localPrev = sharedregion_get_ptr((u32 *)elem->prev);
+ sharedHead = (struct listmp_elem *) sharedregion_get_srptr(
+ (void *)obj->listmp_elem, obj->index);
+ obj->listmp_elem->prev = elem->prev;
+ localPrev->next = sharedHead;
+ }
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ return elem;
+}
+
+/*
+ * ======== listmp_sharedmemory_put_head ========
+ * Purpose:
+ * Function to put head element into a shared memory list
+ */
+int listmp_sharedmemory_put_head(listmp_sharedmemory_handle listmp_handle,
+ struct listmp_elem *elem)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *localNextElem = NULL;
+ struct listmp_elem *sharedElem = NULL;
+ struct listmp_elem *sharedHead = NULL;
+ s32 retval = 0;
+ u32 index;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(listmp_handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+ dsb();
+ index = sharedregion_get_index(elem);
+ sharedElem = (struct listmp_elem *) sharedregion_get_srptr(elem, index);
+ sharedHead = (struct listmp_elem *)sharedregion_get_srptr(
+ (void *)obj->listmp_elem, obj->index);
+ dsb();
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0) {
+ status = -EINVAL;
+ goto exit;
+ }
+ }
+ /* Add the new elem into the list */
+ elem->next = obj->listmp_elem->next;
+ elem->prev = sharedHead;
+ dsb();
+ localNextElem = sharedregion_get_ptr((u32 *)elem->next);
+ localNextElem->prev = sharedElem;
+ obj->listmp_elem->next = sharedElem;
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_put_head failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_put_tail ========
+ * Purpose:
+ * Function to put tail element into a shared memory list
+ */
+int listmp_sharedmemory_put_tail(listmp_sharedmemory_handle listmp_handle,
+ struct listmp_elem *elem)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *localPrevElem = NULL;
+ struct listmp_elem *sharedElem = NULL;
+ struct listmp_elem *sharedHead = NULL;
+ s32 retval = 0;
+ u32 index;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+ dsb();
+ /* Safe to do outside the gate */
+ index = sharedregion_get_index(elem);
+ sharedElem = (struct listmp_elem *)
+ sharedregion_get_srptr(elem, index);
+ sharedHead = (struct listmp_elem *)sharedregion_get_srptr
+ ((void *)obj->listmp_elem,
+ obj->index);
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0) {
+ status = -EINVAL;
+ goto exit;
+ }
+ }
+ /* Add the new elem into the list */
+ elem->next = sharedHead;
+ elem->prev = obj->listmp_elem->prev;
+ dsb();
+ localPrevElem = sharedregion_get_ptr((u32 *)elem->prev);
+ dsb();
+ localPrevElem->next = sharedElem;
+ obj->listmp_elem->prev = sharedElem;
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_put_tail failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_insert ========
+ * Purpose:
+ * Function to insert an element into a shared memory list
+ */
+int listmp_sharedmemory_insert(listmp_sharedmemory_handle listmp_handle,
+ struct listmp_elem *new_elem,
+ struct listmp_elem *cur_elem)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *localPrevElem = NULL;
+ struct listmp_elem *sharedNewElem = NULL;
+ struct listmp_elem *sharedCurElem = NULL;
+ s32 retval = 0;
+ u32 index;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(new_elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0) {
+ status = -EINVAL;
+ goto exit;
+ }
+ }
+
+ /* If NULL change cur_elem to the obj */
+ if (cur_elem == NULL)
+ cur_elem = (struct listmp_elem *)obj->listmp_elem->next;
+
+ /* Get SRPtr for new_elem */
+ index = sharedregion_get_index(new_elem);
+ sharedNewElem = (struct listmp_elem *)
+ sharedregion_get_srptr(new_elem, index);
+ dsb();
+ /* Get SRPtr for cur_elem */
+ index = sharedregion_get_index(cur_elem);
+ sharedCurElem = (struct listmp_elem *)
+ sharedregion_get_srptr(cur_elem, index);
+
+ /* Get SRPtr for cur_elem->prev */
+ localPrevElem = sharedregion_get_ptr((u32 *)cur_elem->prev);
+ dsb();
+ new_elem->next = sharedCurElem;
+ new_elem->prev = cur_elem->prev;
+ localPrevElem->next = sharedNewElem;
+ dsb();
+ cur_elem->prev = sharedNewElem;
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_insert failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_remove ========
+ * Purpose:
+ * Function to remove a element from a shared memory list
+ */
+int listmp_sharedmemory_remove(listmp_sharedmemory_handle listmp_handle,
+ struct listmp_elem *elem)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *localPrevElem = NULL;
+ struct listmp_elem *localNextElem = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0) {
+ status = -EINVAL;
+ goto exit;
+ }
+ }
+
+ localPrevElem = sharedregion_get_ptr((u32 *)elem->prev);
+ localNextElem = sharedregion_get_ptr((u32 *)elem->next);
+ dsb();
+ localPrevElem->next = elem->next;
+ localNextElem->prev = elem->prev;
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_sharedmemory_remove failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/*
+ * ======== listmp_sharedmemory_next ========
+ * Purpose:
+ * Function to traverse to next element in shared memory list
+ */
+void *listmp_sharedmemory_next(listmp_sharedmemory_handle listmp_handle,
+ struct listmp_elem *elem)
+{
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *retElem = NULL;
+ struct listmp_elem *sharedNextElem = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true)
+ goto exit;
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0)
+ goto exit;
+ }
+
+ /* If element is NULL start at head */
+ if (elem == NULL)
+ sharedNextElem = (struct listmp_elem *) obj->listmp_elem->next;
+ else
+ sharedNextElem = (struct listmp_elem *)elem->next;
+
+ retElem = sharedregion_get_ptr((u32 *)sharedNextElem);
+
+ /*! @retval NULL if list is empty */
+ if (retElem == (struct listmp_elem *)obj->listmp_elem)
+ retElem = NULL;
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ return retElem;
+}
+
+/*
+ * ======== listmp_sharedmemory_prev ========
+ * Purpose:
+ * Function to traverse to prev element in shared memory list
+ */
+void *listmp_sharedmemory_prev(listmp_sharedmemory_handle listmp_handle,
+ struct listmp_elem *elem)
+{
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ struct listmp_elem *retElem = NULL;
+ struct listmp_elem *sharedPrevElem = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(listmp_sharedmemory_state.ref_count),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(0),
+ LISTMPSHAREDMEMORY_MAKE_MAGICSTAMP(1)) == true)
+ goto exit;
+
+ handle = (listmp_sharedmemory_object *)listmp_handle;
+ obj = (struct listmp_sharedmemory_obj *) handle->obj;
+
+ if (obj->params.gate != NULL) {
+ retval = gatepeterson_enter(obj->params.gate);
+ if (retval < 0)
+ goto exit;
+ }
+
+ /* If elem is NULL use head */
+ if (elem == NULL)
+ sharedPrevElem = (struct listmp_elem *)
+ obj->listmp_elem->prev;
+ else
+ sharedPrevElem = (struct listmp_elem *)elem->prev;
+
+ retElem = sharedregion_get_ptr((u32 *)sharedPrevElem);
+
+ /*! @retval NULL if list is empty */
+ if (retElem == (struct listmp_elem *)(obj->listmp_elem))
+ retElem = NULL;
+
+ if (obj->params.gate != NULL)
+ gatepeterson_leave(obj->params.gate, 0);
+
+exit:
+ return retElem;
+}
+
+/*
+ * ======== _listmp_sharedmemory_create ========
+ * Purpose:
+ * Creates a new instance of listmp_sharedmemory module. This is an internal
+ * function because both listmp_sharedmemory_create and
+ * listmp_sharedmemory_open call use the same functionality.
+ */
+listmp_sharedmemory_handle _listmp_sharedmemory_create(
+ listmp_sharedmemory_params *params, u32 create_flag)
+{
+ int status = 0;
+ listmp_sharedmemory_object *handle = NULL;
+ struct listmp_sharedmemory_obj *obj = NULL;
+ u32 key;
+ u32 shm_index;
+ u32 *shared_shm_base;
+
+ BUG_ON(params == NULL);
+
+ /* Allow local lock not being provided. Don't do protection if local
+ * lock is not provided.
+ */
+ /* Create the handle */
+ handle = kzalloc(sizeof(listmp_sharedmemory_object), GFP_KERNEL);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ obj = kzalloc(sizeof(struct listmp_sharedmemory_obj),
+ GFP_KERNEL);
+ if (obj == NULL) {
+ /*! @retval NULL if Memory allocation failed for
+ * internal object "Memory allocation failed for handle"
+ * "of type struct listmp_sharedmemory_obj"); */
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ handle->obj = (struct listmp_sharedmemory_obj *)obj ;
+ handle->empty = &listmp_sharedmemory_empty;
+ handle->get_head = &listmp_sharedmemory_get_head;
+ handle->get_tail = &listmp_sharedmemory_get_tail;
+ handle->put_head = &listmp_sharedmemory_put_head;
+ handle->put_tail = &listmp_sharedmemory_put_tail;
+ handle->insert = &listmp_sharedmemory_insert;
+ handle->remove = &listmp_sharedmemory_remove;
+ handle->next = &listmp_sharedmemory_next;
+ handle->prev = &listmp_sharedmemory_prev;
+
+ /* Update attrs */
+ obj->attrs = (struct listmp_attrs *) params->shared_addr;
+ /* Assign the memory with proper cache line padding */
+ obj->listmp_elem = (void *) ((u32)obj->attrs + \
+ LISTMP_SHAREDMEMORY_CACHESIZE);
+ obj->index = sharedregion_get_index(params->shared_addr);
+
+ if (create_flag == true) {
+ obj->attrs->shared_addr_size = params->shared_addr_size;
+ obj->attrs->version = LISTMP_SHAREDMEMORY_VERSION;
+ obj->listmp_elem->next = obj->listmp_elem->prev =
+ (struct listmp_elem *)
+ (sharedregion_get_srptr((void *)obj->listmp_elem,
+ obj->index));
+ }
+
+ /* Populate the params member */
+ memcpy((void *)&obj->params, (void *)params,
+ sizeof(listmp_sharedmemory_params));
+
+ if (likely(listmp_sharedmemory_state.cfg.use_name_server
+ == true)) {
+ if (obj->params.name != NULL) {
+ /* Copy the name */
+ obj->params.name = kmalloc(strlen(params->name) + 1,
+ GFP_KERNEL);
+
+ if (obj->params.name == NULL) {
+ /*! @retval NULL if Memory allocation failed for
+ name */
+ status = -ENOMEM;
+ } else {
+ strncpy(obj->params.name, params->name,
+ strlen(params->name) + 1);
+ }
+ }
+ }
+
+ /* Update processor information */
+ obj->owner = kmalloc(sizeof(struct listmp_proc_attrs),
+ GFP_KERNEL);
+ if (obj->owner == NULL) {
+ printk(KERN_ERR "_listmp_sharedmemory_create: Memory "
+ "allocation failed for processor information!\n");
+ status = -ENOMEM;
+ } else {
+ /* Update owner and opener details */
+ if (create_flag == true) {
+ obj->owner->creator = true;
+ obj->owner->open_count = 1;
+ obj->owner->proc_id = multiproc_get_id(NULL);
+ obj->top = handle;
+ obj->attrs->status = \
+ LISTMP_SHAREDMEMORY_CREATED;
+ } else {
+ obj->owner->creator = false;
+ obj->owner->open_count = 0;
+ obj->owner->proc_id = MULTIPROC_INVALIDID;
+ obj->top = handle;
+ }
+
+ /* Put in the local list */
+ key = mutex_lock_interruptible(
+ listmp_sharedmemory_state.local_gate);
+ INIT_LIST_HEAD(&obj->list_elem);
+ list_add_tail((&obj->list_elem),
+ &listmp_sharedmemory_state.obj_list);
+ mutex_unlock(listmp_sharedmemory_state.local_gate);
+
+ if (create_flag == true) {
+
+ /* We will store a shared pointer in the
+ * NameServer
+ */
+ shm_index = sharedregion_get_index(params->shared_addr);
+ shared_shm_base = sharedregion_get_srptr(
+ params->shared_addr, shm_index);
+ /* Add list instance to name server */
+ if (obj->params.name != NULL) {
+ obj->ns_key = nameserver_add_uint32(
+ listmp_sharedmemory_state.ns_handle,
+ params->name,
+ (u32) (shared_shm_base));
+ if (obj->ns_key == NULL)
+ status = -EFAULT;
+ }
+ }
+ }
+
+ if (status < 0) {
+ /* Remove from the local list */
+ key = mutex_lock_interruptible(
+ listmp_sharedmemory_state.local_gate);
+ list_del(&obj->list_elem);
+ mutex_unlock(listmp_sharedmemory_state.local_gate);
+
+ if (likely(listmp_sharedmemory_state.cfg.use_name_server
+ == true)) {
+ if ((obj->params.name != NULL) && (status != -EFAULT))
+ nameserver_remove_entry(
+ listmp_sharedmemory_state.ns_handle,
+ obj->ns_key);
+ }
+ if (obj->owner != NULL)
+ kfree(obj->owner);
+
+ if (obj->params.name != NULL)
+ kfree(obj->params.name);
+
+ if (obj != NULL) {
+ kfree(obj);
+ obj = NULL;
+ }
+ if (handle != NULL) {
+ kfree(handle);
+ handle = NULL;
+ }
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "_listmp_sharedmemory_create failed! "
+ "status = 0x%x\n", status);
+ }
+ return (listmp_sharedmemory_handle) handle;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory_ioctl.c b/drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory_ioctl.c
new file mode 100644
index 000000000000..292881665788
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/listmp_sharedmemory_ioctl.c
@@ -0,0 +1,701 @@
+/*
+ * listmp_sharedmemory_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the
+ * listmp_sharedmemory module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+/* Module Headers */
+#include <listmp.h>
+#include <listmp_sharedmemory.h>
+#include <listmp_sharedmemory_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== listmp_sharedmemory_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_get_config function
+ */
+static inline int listmp_sharedmemory_ioctl_get_config(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct listmp_config config;
+
+ status = listmp_sharedmemory_get_config(&config);
+ if (unlikely(status))
+ goto exit;
+
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct listmp_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_setup function
+ */
+static inline int listmp_sharedmemory_ioctl_setup(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct listmp_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct listmp_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ status = listmp_sharedmemory_setup(&config);
+ cargs->api_status = status;
+ if (unlikely(status))
+ goto exit;
+exit:
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_destroy function
+ */
+static inline int listmp_sharedmemory_ioctl_destroy(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ cargs->api_status = listmp_sharedmemory_destroy();
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_params_init function
+ */
+static inline int listmp_sharedmemory_ioctl_params_init(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ listmp_sharedmemory_params params;
+
+ size = copy_from_user(&params,
+ cargs->args.params_init.params,
+ sizeof(listmp_sharedmemory_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ listmp_sharedmemory_params_init(
+ cargs->args.params_init.listmp_handle, &params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(listmp_sharedmemory_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ cargs->api_status = status;
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_create function
+ */
+static inline int listmp_sharedmemory_ioctl_create(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ listmp_sharedmemory_params params;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(listmp_sharedmemory_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ /* Allocate memory for the name */
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ /* Copy the name */
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.create.shared_addr_srptr);
+ if (unlikely(params.shared_addr == NULL))
+ goto free_name;
+
+ /* Update gate in params. */
+ params.gate = cargs->args.create.knl_gate;
+ cargs->args.create.listmp_handle = listmp_sharedmemory_create(&params);
+
+ size = copy_to_user(cargs->args.create.params, &params,
+ sizeof(listmp_sharedmemory_params));
+ if (!size)
+ goto free_name;
+
+ /* Error copying, so delete the handle */
+ retval = -EFAULT;
+ if (cargs->args.create.listmp_handle)
+ listmp_sharedmemory_delete(&cargs->args.create.listmp_handle);
+
+free_name:
+ if (cargs->args.create.name_len > 0)
+ kfree(params.name);
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_delete function
+ */
+static inline int listmp_sharedmemory_ioctl_delete(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ cargs->api_status = listmp_sharedmemory_delete(
+ &(cargs->args.delete_listmp.listmp_handle));
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_open function
+ */
+static inline int listmp_sharedmemory_ioctl_open(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ listmp_sharedmemory_params params;
+ void *listmp_handle = NULL;
+
+ if (unlikely(cargs->args.open.params == NULL)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ size = copy_from_user(&params, cargs->args.open.params,
+ sizeof(listmp_sharedmemory_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ if ((cargs->args.open.name_len > 0) &&
+ (cargs->args.open.params->name != NULL)) {
+ params.name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ /* Copy the name */
+ size = copy_from_user(params.name,
+ cargs->args.open.params->name,
+ cargs->args.open.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ /* For open by name, the shared_add_srptr may be invalid */
+ if (cargs->args.open.shared_addr_srptr != \
+ (u32)SHAREDREGION_INVALIDSRPTR) {
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.open.shared_addr_srptr);
+ }
+ if (unlikely(params.shared_addr == NULL))
+ goto free_name;
+
+ /* Update gate in params. */
+ params.gate = cargs->args.open.knl_gate;
+ status = listmp_sharedmemory_open(&listmp_handle, &params);
+ if (status < 0)
+ goto free_name;
+ cargs->args.open.listmp_handle = listmp_handle;
+
+ size = copy_to_user(cargs->args.open.params, &params,
+ sizeof(listmp_sharedmemory_params));
+ if (!size)
+ goto free_name;
+
+ /* Error copying, so close the handle */
+ retval = -EFAULT;
+ if (cargs->args.open.listmp_handle) {
+ listmp_sharedmemory_close(cargs->args.open.listmp_handle);
+ cargs->args.open.listmp_handle = NULL;
+ }
+
+free_name:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_close function
+ */
+static inline int listmp_sharedmemory_ioctl_close(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ cargs->api_status = \
+ listmp_sharedmemory_close(cargs->args.close.listmp_handle);
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_is_empty ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_empty function
+ */
+static inline int listmp_sharedmemory_ioctl_empty(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ cargs->args.is_empty.is_empty = \
+ listmp_sharedmemory_empty(cargs->args.is_empty.listmp_handle);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_get_head ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_get_head function
+ */
+static inline int listmp_sharedmemory_ioctl_get_head(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+ u32 *elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ cargs->api_status = LISTMPSHAREDMEMORY_E_FAIL;
+
+ elem = listmp_sharedmemory_get_head(cargs->args.get_head.listmp_handle);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_index(elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ elem_srptr = sharedregion_get_srptr((void *)elem, index);
+
+ cargs->api_status = 0;
+exit:
+ cargs->args.get_head.elem_srptr = elem_srptr;
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_get_tail ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_get_tail function
+ */
+static inline int listmp_sharedmemory_ioctl_get_tail(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+ u32 *elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ cargs->api_status = LISTMPSHAREDMEMORY_E_FAIL;
+
+ elem = listmp_sharedmemory_get_tail(cargs->args.get_tail.listmp_handle);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_index(elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ elem_srptr = sharedregion_get_srptr((void *)elem, index);
+
+ cargs->api_status = 0;
+exit:
+ cargs->args.get_tail.elem_srptr = elem_srptr;
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_put_head ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_put_head function
+ */
+static inline int listmp_sharedmemory_ioctl_put_head(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+
+ cargs->api_status = LISTMPSHAREDMEMORY_E_FAIL;
+
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.put_head.elem_srptr);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ cargs->api_status = listmp_sharedmemory_put_head(
+ cargs->args.put_head.listmp_handle, elem);
+exit:
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_put_tail ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_put_tail function
+ */
+static inline int listmp_sharedmemory_ioctl_put_tail(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+
+ cargs->api_status = LISTMPSHAREDMEMORY_E_FAIL;
+
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.put_tail.elem_srptr);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ cargs->api_status = listmp_sharedmemory_put_tail(
+ cargs->args.put_head.listmp_handle, elem);
+exit:
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_insert ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_insert function
+ */
+static inline int listmp_sharedmemory_ioctl_insert(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *new_elem;
+ struct listmp_elem *cur_elem;
+
+ new_elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.insert.new_elem_srptr);
+ if (unlikely(new_elem == NULL))
+ goto exit;
+
+ cur_elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.insert.cur_elem_srptr);
+ if (unlikely(cur_elem == NULL))
+ goto exit;
+
+ cargs->api_status = listmp_sharedmemory_insert(
+ cargs->args.insert.listmp_handle, new_elem, cur_elem);
+exit:
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_remove ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_remove function
+ */
+static inline int listmp_sharedmemory_ioctl_remove(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.remove.elem_srptr);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ cargs->api_status = listmp_sharedmemory_remove(
+ cargs->args.get_head.listmp_handle, elem);
+
+exit:
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_next ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_next function
+ */
+static inline int listmp_sharedmemory_ioctl_next(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *ret_elem = NULL;
+ u32 *next_elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ if (cargs->args.next.elem_srptr != NULL) {
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.next.elem_srptr);
+ }
+ ret_elem = (struct listmp_elem *) listmp_sharedmemory_next(
+ cargs->args.next.listmp_handle, elem);
+ if (unlikely(ret_elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_index(ret_elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ next_elem_srptr = sharedregion_get_srptr((void *)ret_elem, index);
+
+ cargs->api_status = 0;
+exit:
+ cargs->args.next.next_elem_srptr = next_elem_srptr;
+ return 0;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_prev ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_prev function
+ */
+static inline int listmp_sharedmemory_ioctl_prev(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *ret_elem = NULL;
+ u32 *prev_elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ if (cargs->args.next.elem_srptr != NULL) {
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.prev.elem_srptr);
+ }
+ ret_elem = (struct listmp_elem *) listmp_sharedmemory_prev(
+ cargs->args.prev.listmp_handle, elem);
+ if (unlikely(ret_elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_index(ret_elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ prev_elem_srptr = sharedregion_get_srptr((void *)ret_elem, index);
+
+ cargs->api_status = 0;
+exit:
+ cargs->args.prev.prev_elem_srptr = prev_elem_srptr;
+ return 0;
+
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl_shared_memreq ========
+ * Purpose:
+ * This ioctl interface to listmp_sharedmemory_shared_memreq function
+ */
+static inline int listmp_sharedmemory_ioctl_shared_memreq(
+ struct listmp_sharedmemory_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ listmp_sharedmemory_params params;
+
+ size = copy_from_user(&params, cargs->args.shared_memreq.params,
+ sizeof(listmp_sharedmemory_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->args.shared_memreq.bytes =
+ listmp_sharedmemory_shared_memreq(&params);
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== listmp_sharedmemory_ioctl ========
+ * Purpose:
+ * ioctl interface function for listmp_sharedmemory module
+ */
+int listmp_sharedmemory_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct listmp_sharedmemory_cmd_args __user *uarg =
+ (struct listmp_sharedmemory_cmd_args __user *)args;
+ struct listmp_sharedmemory_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct listmp_sharedmemory_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_LISTMP_SHAREDMEMORY_GETCONFIG:
+ os_status = listmp_sharedmemory_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_SETUP:
+ os_status = listmp_sharedmemory_ioctl_setup(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_DESTROY:
+ os_status = listmp_sharedmemory_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_PARAMS_INIT:
+ os_status = listmp_sharedmemory_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_CREATE:
+ os_status = listmp_sharedmemory_ioctl_create(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_DELETE:
+ os_status = listmp_sharedmemory_ioctl_delete(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_OPEN:
+ os_status = listmp_sharedmemory_ioctl_open(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_CLOSE:
+ os_status = listmp_sharedmemory_ioctl_close(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_ISEMPTY:
+ os_status = listmp_sharedmemory_ioctl_empty(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_GETHEAD:
+ os_status = listmp_sharedmemory_ioctl_get_head(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_GETTAIL:
+ os_status = listmp_sharedmemory_ioctl_get_tail(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_PUTHEAD:
+ os_status = listmp_sharedmemory_ioctl_put_head(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_PUTTAIL:
+ os_status = listmp_sharedmemory_ioctl_put_tail(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_INSERT:
+ os_status = listmp_sharedmemory_ioctl_insert(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_REMOVE:
+ os_status = listmp_sharedmemory_ioctl_remove(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_NEXT:
+ os_status = listmp_sharedmemory_ioctl_next(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_PREV:
+ os_status = listmp_sharedmemory_ioctl_prev(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMORY_SHAREDMEMREQ:
+ os_status = listmp_sharedmemory_ioctl_shared_memreq(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct listmp_sharedmemory_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+ return os_status;
+
+exit:
+ printk(KERN_ERR "listmp_sharedmemory_ioctl failed: status = 0x%x\n",
+ os_status);
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/messageq.c b/drivers/dsp/syslink/multicore_ipc/messageq.c
new file mode 100755
index 000000000000..cc2eeca21ef5
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/messageq.c
@@ -0,0 +1,1597 @@
+/*
+ * messageq.c
+ *
+ * The messageQ module supports the structured sending and receiving of
+ * variable length messages. This module can be used for homogeneous or
+ * heterogeneous multi-processor messaging.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/*!
+ * MessageQ provides more sophisticated messaging than other modules. It is
+ * typically used for complex situations such as multi-processor messaging.
+ *
+ * The following are key features of the MessageQ module:
+ * -Writers and readers can be relocated to another processor with no
+ * runtime code changes.
+ * -Timeouts are allowed when receiving messages.
+ * -Readers can determine the writer and reply back.
+ * -Receiving a message is deterministic when the timeout is zero.
+ * -Messages can reside on any message queue.
+ * -Supports zero-copy transfers.
+ * -Can send and receive from any type of thread.
+ * -Notification mechanism is specified by application.
+ * -Allows QoS (quality of service) on message buffer pools. For example,
+ * using specific buffer pools for specific message queues.
+ *
+ * Messages are sent and received via a message queue. A reader is a thread
+ * that gets (reads) messages from a message queue. A writer is a thread that
+ * puts (writes) a message to a message queue. Each message queue has one
+ * reader and can have many writers. A thread may read from or write to
+ * multiple message queues.
+ *
+ * Conceptually, the reader thread owns a message queue. The reader thread
+ * creates a message queue. Writer threads a created message queues to
+ * get access to them.
+ *
+ * Message queues are identified by a system-wide unique name. Internally,
+ * MessageQ uses the NameServer module for managing
+ * these names. The names are used for opening a message queue. Using
+ * names is not required.
+ *
+ * Messages must be allocated from the MessageQ module. Once a message is
+ * allocated, it can be sent on any message queue. Once a message is sent, the
+ * writer loses ownership of the message and should not attempt to modify the
+ * message. Once the reader receives the message, it owns the message. It
+ * may either free the message or re-use the message.
+ *
+ * Messages in a message queue can be of variable length. The only
+ * requirement is that the first field in the definition of a message must be a
+ * MsgHeader structure. For example:
+ * typedef struct MyMsg {
+ * messageq_MsgHeader header;
+ * ...
+ * } MyMsg;
+ *
+ * The MessageQ API uses the messageq_MsgHeader internally. Your application
+ * should not modify or directly access the fields in the messageq_MsgHeader.
+ *
+ * All messages sent via the MessageQ module must be allocated from a
+ * Heap implementation. The heap can be used for
+ * other memory allocation not related to MessageQ.
+ *
+ * An application can use multiple heaps. The purpose of having multiple
+ * heaps is to allow an application to regulate its message usage. For
+ * example, an application can allocate critical messages from one heap of fast
+ * on-chip memory and non-critical messages from another heap of slower
+ * external memory
+ *
+ * MessageQ does support the usage of messages that allocated via the
+ * alloc function. Please refer to the static_msg_init
+ * function description for more details.
+ *
+ * In a multiple processor system, MessageQ communications to other
+ * processors via MessageQ_transport} instances. There must be one and
+ * only one IMessageQ_transport instance for each processor where communication
+ * is desired.
+ * So on a four processor system, each processor must have three
+ * IMessageQ_transport instance.
+ *
+ * The user only needs to create the IMessageQ_transport instances. The
+ * instances are responsible for registering themselves with MessageQ.
+ * This is accomplished via the register_transport function.
+ */
+
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+
+/* Module level headers */
+#include <nameserver.h>
+#include <multiproc.h>
+#include <messageq_transportshm.h>
+#include <heap.h>
+#include <messageq.h>
+
+
+/* Macro to make a correct module magic number with refCount */
+#define MESSAGEQ_MAKE_MAGICSTAMP(x) ((MESSAGEQ_MODULEID << 12u) | (x))
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*!
+ * @brief Name of the reserved NameServer used for MessageQ.
+ */
+#define MESSAGEQ_NAMESERVER "MessageQ"
+
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/* structure for MessageQ module state */
+struct messageq_module_object {
+ atomic_t ref_count;
+ /*!< Reference count */
+ void *ns_handle;
+ /*!< Handle to the local NameServer used for storing GP objects */
+ struct mutex *gate_handle;
+ /*!< Handle of gate to be used for local thread safety */
+ struct messageq_config cfg;
+ /*!< Current config values */
+ struct messageq_config default_cfg;
+ /*!< Default config values */
+ struct messageq_params default_inst_params;
+ /*!< Default instance creation parameters */
+ void *transports[MULTIPROC_MAXPROCESSORS][MESSAGEQ_NUM_PRIORITY_QUEUES];
+ /*!< Transport to be set in messageq_register_transport */
+ void **queues; /*messageq_handle *queues;*/
+ /*!< Grow option */
+ void **heaps; /*Heap_Handle *heaps; */
+ /*!< Heap to be set in messageq_registerHeap */
+ u16 num_queues;
+ /*!< Heap to be set in messageq_registerHeap */
+ u16 num_heaps;
+ /*!< Number of Heaps */
+ bool can_free_queues;
+ /*!< Grow option */
+};
+
+/*!
+ * @brief Structure for the Handle for the MessageQ.
+ */
+struct messageq_object {
+ struct messageq_params params;
+ /*! Instance specific creation parameters */
+ char *name;
+ /*! MessageQ name */
+ u32 queue;
+ /* Unique id */
+ struct list_head normal_list;
+ /* Embedded List objects */
+ struct list_head high_list;
+ /* Embedded List objects */
+ /*OsalSemaphore_Handle synchronizer;*/
+ struct semaphore *synchronizer;
+ /* Semaphore used for synchronizing message events */
+};
+
+
+static struct messageq_module_object messageq_state = {
+ .ns_handle = NULL,
+ .gate_handle = NULL,
+ .queues = NULL,
+ .heaps = NULL,
+ .num_queues = 1,
+ .num_heaps = 1,
+ .can_free_queues = false,
+ .default_cfg.num_heaps = 1,
+ .default_cfg.max_runtime_entries = 32,
+ .default_cfg.name_table_gate = NULL,
+ .default_cfg.max_name_len = 32,
+ .default_inst_params.reserved = 0
+};
+
+
+/* =============================================================================
+ * Constants
+ * =============================================================================
+ */
+/*
+ * Used to denote a message that was initialized
+ * with the messageq_static_msg_init function.
+ */
+#define MESSAGEQ_STATICMSG 0xFFFF
+
+
+/* =============================================================================
+ * Forward declarations of internal functions
+ * =============================================================================
+ */
+/*
+ * @brief Grow the MessageQ table
+ *
+ * @sa messageq_create
+ */
+static u16 _messageq_grow(struct messageq_object *obj);
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== messageq_get_config ========
+ * Purpose:
+ * Function to get the default configuration for the MessageQ
+ * module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to MessageQ_setup filled in by the
+ * MessageQ module with the default parameters. If the user does
+ * not wish to make any change in the default parameters, this API
+ * is not required to be called.
+ * the listmp_sharedmemory module.
+ */
+void messageq_get_config(struct messageq_config *cfg)
+{
+ if (WARN_ON(cfg == NULL))
+ goto exit;
+
+ if (atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true) {
+ /* (If setup has not yet been called) */
+ memcpy(cfg, &messageq_state.default_cfg,
+ sizeof(struct messageq_config));
+ } else {
+ memcpy(cfg, &messageq_state.cfg,
+ sizeof(struct messageq_config));
+ }
+ return;
+
+exit:
+ printk(KERN_ERR "messageq_get_config: Argument of type "
+ "(struct messageq_config *) passed is null!\n");
+}
+EXPORT_SYMBOL(messageq_get_config);
+
+/*
+ * ======== messageq_setup ========
+ * Purpose:
+ * Function to setup the MessageQ module.
+ *
+ * This function sets up the MessageQ module. This function must
+ * be called before any other instance-level APIs can be invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then MessageQ_getConfig can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. If the user
+ * does not wish to make any change in the default parameters, the
+ * application can simply call MessageQ with NULL parameters.
+ * The default parameters would get automatically used.
+ */
+int messageq_setup(const struct messageq_config *cfg)
+{
+ int status = MESSAGEQ_SUCCESS;
+ struct nameserver_params params;
+ struct messageq_config tmpcfg;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&messageq_state.ref_count,
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&messageq_state.ref_count)
+ != MESSAGEQ_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ messageq_get_config(&tmpcfg);
+ cfg = &tmpcfg;
+ }
+
+ if (WARN_ON(cfg->max_name_len == 0)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(cfg->max_name_len == 0)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (cfg->name_table_gate != NULL) {
+ messageq_state.gate_handle = cfg->name_table_gate;
+ } else {
+ /* User has not provided any gate handle, so create a default
+ * handle for protecting list object */
+ messageq_state.gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (messageq_state.gate_handle == NULL) {
+ /*! @retval MESSAGEQ_E_FAIL Failed to create lock! */
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_setup: Failed to create a "
+ "mutex.\n");
+ status = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(messageq_state.gate_handle);
+ }
+
+ /* Initialize the parameters */
+ nameserver_params_init(&params);
+ params.max_value_len = 4;
+ params.max_name_len = cfg->max_name_len;
+
+ /* Create the nameserver for modules */
+ messageq_state.ns_handle = nameserver_create(MESSAGEQ_NAMESERVER,
+ &params);
+ if (messageq_state.ns_handle == NULL) {
+ /*! @retval MESSAGEQ_E_FAIL Failed to create the
+ * MessageQ nameserver*/
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_setup: Failed to create the messageq"
+ "nameserver!\n");
+ goto nameserver_create_fail;
+ }
+
+ messageq_state.num_heaps = cfg->num_heaps;
+ messageq_state.heaps = kzalloc(sizeof(void *) * \
+ messageq_state.num_heaps, GFP_KERNEL);
+ if (messageq_state.heaps == NULL) {
+ status = -ENOMEM;
+ goto heaps_alloc_fail;
+ }
+
+ messageq_state.num_queues = cfg->max_runtime_entries;
+ messageq_state.queues = kzalloc(sizeof(struct messageq_object *) * \
+ messageq_state.num_queues, GFP_KERNEL);
+ if (messageq_state.queues == NULL) {
+ status = -ENOMEM;
+ goto queues_alloc_fail;
+ }
+
+ memset(&(messageq_state.transports), 0, (sizeof(void *) * \
+ MULTIPROC_MAXPROCESSORS * \
+ MESSAGEQ_NUM_PRIORITY_QUEUES));
+
+ BUG_ON(status < 0);
+ return status;
+
+queues_alloc_fail:
+ if (messageq_state.queues != NULL)
+ kfree(messageq_state.queues);
+heaps_alloc_fail:
+ if (messageq_state.heaps != NULL)
+ kfree(messageq_state.heaps);
+ if (messageq_state.ns_handle != NULL)
+ nameserver_delete(&messageq_state.ns_handle);
+nameserver_create_fail:
+ if (cfg->name_table_gate != NULL) {
+ if (messageq_state.gate_handle != NULL) {
+ kfree(messageq_state.gate_handle);
+ messageq_state.gate_handle = NULL;
+ }
+ }
+
+ memset(&messageq_state.cfg, 0, sizeof(struct messageq_config));
+ messageq_state.queues = NULL;
+ messageq_state.heaps = NULL;
+ messageq_state.num_queues = 0;
+ messageq_state.num_heaps = 1;
+ messageq_state.can_free_queues = true;
+
+exit:
+ if (status < 0) {
+ atomic_set(&messageq_state.ref_count,
+ MESSAGEQ_MAKE_MAGICSTAMP(0));
+ printk(KERN_ERR "messageq_setup failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_setup);
+
+/*
+ * ======== messageq_destroy ========
+ * Purpose:
+ * Function to destroy the MessageQ module.
+ */
+int messageq_destroy(void)
+{
+ int status = MESSAGEQ_SUCCESS;
+ u32 i;
+
+ if (atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&messageq_state.ref_count)
+ == MESSAGEQ_MAKE_MAGICSTAMP(0))) {
+ status = 1;
+ goto exit;
+ }
+
+ if (WARN_ON(messageq_state.ns_handle == NULL)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Delete any Message Queues that have not been deleted so far. */
+ for (i = 0; i < messageq_state.num_queues; i++) {
+ if (messageq_state.queues[i] != NULL)
+ messageq_delete(&(messageq_state.queues[i]));
+ }
+
+ /* Delete the nameserver for modules */
+ status = nameserver_delete(&messageq_state.ns_handle);
+ BUG_ON(status < 0);
+
+ /* Delete the gate if created internally */
+ if (messageq_state.cfg.name_table_gate == NULL) {
+ kfree(messageq_state.gate_handle);
+ messageq_state.gate_handle = NULL;
+ BUG_ON(status < 0);
+ }
+
+ memset(&(messageq_state.transports), 0, (sizeof(void *) * \
+ MULTIPROC_MAXPROCESSORS * MESSAGEQ_NUM_PRIORITY_QUEUES));
+ if (messageq_state.heaps != NULL) {
+ kfree(messageq_state.heaps);
+ messageq_state.heaps = NULL;
+ }
+ if (messageq_state.queues != NULL) {
+ kfree(messageq_state.queues);
+ messageq_state.queues = NULL;
+ }
+
+ memset(&messageq_state.cfg, 0, sizeof(struct messageq_config));
+ messageq_state.num_queues = 0;
+ messageq_state.num_heaps = 1;
+ messageq_state.can_free_queues = true;
+ atomic_set(&messageq_state.ref_count,
+ MESSAGEQ_MAKE_MAGICSTAMP(0));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_destroy failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_destroy);
+
+/*
+ * ======== messageq_params_init ========
+ * Purpose:
+ * Initialize this config-params structure with supplier-specified
+ * defaults before instance creation.
+ */
+void messageq_params_init(void *messageq_handle,
+ struct messageq_params *params)
+{
+ struct messageq_object *object = NULL;
+
+ if (atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)
+ goto exit;
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "messageq_params_init failed:Argument of "
+ "type(messageq_params *) is NULL!\n");
+ goto exit;
+ }
+
+ if (messageq_handle == NULL) {
+ memcpy(params, &(messageq_state.default_inst_params),
+ sizeof(struct messageq_params));
+ } else {
+ object = (struct messageq_object *) messageq_handle;
+ memcpy((void *)params, (void *)&object->params,
+ sizeof(struct messageq_params));
+ }
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_params_init);
+
+/*
+ * ======== messageq_create ========
+ * Purpose:
+ * Creates a new instance of MessageQ module.
+ */
+void *messageq_create(char *name, const struct messageq_params *params)
+{
+ int status = 0;
+ struct messageq_object *handle = NULL;
+ bool found = false;
+ u16 count = 0;
+ int i;
+ u16 start;
+ u16 queueIndex = 0;
+
+ if (atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Create the generic handle */
+ handle = kzalloc(sizeof(struct messageq_object), 0);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ start = 0; /* Statically allocated objects not supported */
+ count = messageq_state.num_queues;
+ /* Search the dynamic array for any holes */
+ for (i = start; i < count ; i++) {
+ if (messageq_state.queues[i] == NULL) {
+ messageq_state.queues[i] = (void *) handle;
+ queueIndex = i;
+ found = true;
+ break;
+ }
+ }
+ /*
+ * If no free slot was found:
+ * - if no growth allowed, raise an error
+ * - if growth is allowed, grow the array
+ */
+ if (found == false) {
+ if (messageq_state.cfg.max_runtime_entries
+ != MESSAGEQ_ALLOWGROWTH) {
+ mutex_unlock(messageq_state.gate_handle);
+ status = MESSAGEQ_E_MAXREACHED;
+ printk(KERN_ERR "messageq_create: All message queues "
+ "are full!\n");
+ goto free_slot_fail;
+ } else {
+ queueIndex = _messageq_grow(handle);
+ if (queueIndex == MESSAGEQ_INVALIDMESSAGEQ) {
+ mutex_unlock(messageq_state.gate_handle);
+ status = MESSAGEQ_E_MAXREACHED;
+ printk(KERN_ERR "messageq_create: All message "
+ "queues are full!\n");
+ goto free_slot_fail;
+ }
+ }
+ }
+
+ BUG_ON(status < 0);
+ mutex_unlock(messageq_state.gate_handle);
+
+ /* Construct the list object */
+ INIT_LIST_HEAD(&handle->normal_list);
+ INIT_LIST_HEAD(&handle->high_list);
+
+ /* Copy the name */
+ if (name != NULL) {
+ handle->name = kmalloc((strlen(name) + 1), GFP_KERNEL);
+ if (handle->name == NULL) {
+ status = -ENOMEM;
+ goto handle_name_alloc_fail;
+ }
+ strncpy(handle->name, name, strlen(name) + 1);
+ }
+
+ /* Update processor information */
+ handle->queue = ((u32)(multiproc_get_id(NULL)) << 16) | queueIndex;
+ /*handle->synchronizer = OsalSemaphore_create(OsalSemaphore_Type_Binary
+ | OsalSemaphore_IntType_Interruptible);*/
+ handle->synchronizer = kzalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (handle->synchronizer == NULL) {
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_create: Failed to create "
+ "synchronizer semaphore!\n");
+ goto semaphore_create_fail;
+ } else {
+ sema_init(handle->synchronizer, 0);
+ }
+
+ if (name != NULL) {
+ nameserver_add_uint32(messageq_state.ns_handle, name,
+ handle->queue);
+ }
+ goto exit;
+
+semaphore_create_fail:
+ if (handle->name != NULL)
+ kfree(handle->name);
+
+handle_name_alloc_fail:
+ list_del(&handle->high_list);
+ list_del(&handle->normal_list);
+
+free_slot_fail:
+ /* Now free the handle */
+ if (handle != NULL) {
+ kfree(handle);
+ handle = NULL;
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_create failed! status = 0x%x\n",
+ status);
+ }
+ return (void *) handle;
+}
+EXPORT_SYMBOL(messageq_create);
+
+/*
+ * ======== messageq_delete ========
+ * Purpose:
+ * Deletes a instance of MessageQ module.
+ */
+int messageq_delete(void **msg_handleptr)
+{
+ int status = 0;
+ struct messageq_object *handle = NULL;
+
+ if (atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(msg_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*msg_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct messageq_object *) (*msg_handleptr);
+
+ /* Take the local lock */
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+
+ if (handle->name != NULL) {
+ /* remove from the name serve */
+ nameserver_remove(messageq_state.ns_handle, handle->name);
+ /* Free memory for the name */
+ kfree(handle->name);
+ }
+
+ /* Release the local lock */
+ mutex_unlock(messageq_state.gate_handle);
+
+ /* Free the list */
+ list_del(&handle->high_list);
+ list_del(&handle->normal_list);
+
+ /*if (handle->synchronizer != NULL)
+ status = OsalSemaphore_delete(&handle->synchronizer);*/
+ if (handle->synchronizer != NULL) {
+ kfree(handle->synchronizer);
+ handle->synchronizer = NULL;
+ }
+ /* Clear the MessageQ handle from array. */
+ messageq_state.queues[handle->queue] = NULL;
+
+ /* Now free the handle */
+ kfree(handle);
+ *msg_handleptr = NULL;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_delete failed! status = 0x%x\n",
+ status);;
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_delete);
+
+/*
+ * ======== messageq_open ========
+ * Purpose:
+ * Opens a created instance of MessageQ module.
+ */
+int messageq_open(char *name, u32 *queue_id)
+{
+ int status = 0;
+ int len = 0;
+
+ if (atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(queue_id == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Initialize return queue ID to invalid. */
+ *queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+ len = nameserver_get(messageq_state.ns_handle, name, queue_id,
+ sizeof(u32), NULL);
+ if (len < 0) {
+ if (len == -ENOENT) {
+ /* Name not found */
+ status = -ENOENT;
+ } else {
+ /* Any other error from nameserver */
+ status = len;
+ }
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_open failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_open);
+
+/*
+ * ======== messageq_close ========
+ * Purpose:
+ * Closes previously opened/created instance of MessageQ module.
+ */
+void messageq_close(u32 *queue_id)
+{
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(queue_id == NULL)) {
+ printk(KERN_ERR "messageq_close: queue_id passed is NULL!\n");
+ goto exit;
+ }
+
+ *queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_close);
+
+/*
+ * ======== messageq_get ========
+ */
+int messageq_get(void *messageq_handle, messageq_msg *msg,
+ u32 timeout)
+{
+ int status = 0;
+ struct messageq_object *obj = (struct messageq_object *)messageq_handle;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(msg == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON(obj == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Keep looping while there is no element in the list */
+ /* Take the local lock */
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (!list_empty(&obj->high_list)) {
+ *msg = (messageq_msg) (obj->high_list.next);
+ list_del_init(obj->high_list.next);
+ }
+ /* Leave the local lock */
+ mutex_unlock(messageq_state.gate_handle);
+ while (*msg == NULL) {
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (!list_empty(&obj->normal_list)) {
+ *msg = (messageq_msg) (obj->normal_list.next);
+ list_del_init(obj->normal_list.next);
+ }
+ mutex_unlock(messageq_state.gate_handle);
+
+ if (*msg == NULL) {
+ /*
+ * Block until notified. If pend times-out, no message
+ * should be returned to the caller
+ */
+ /*! @retval NULL timeout has occurred */
+ if (obj->synchronizer != NULL) {
+ /* TODO: cater to different timeout values */
+ /*status = OsalSemaphore_pend(
+ obj->synchronizer, timeout); */
+ if (timeout == MESSAGEQ_FOREVER) {
+ if (down_interruptible
+ (obj->synchronizer)) {
+ status = -ERESTARTSYS;
+ }
+ } else {
+ status = down_timeout(obj->synchronizer,
+ msecs_to_jiffies(timeout));
+ }
+ if (status < 0) {
+ *msg = NULL;
+ break;
+ }
+ }
+ status = mutex_lock_interruptible(
+ messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (!list_empty(&obj->high_list)) {
+ *msg = (messageq_msg) (obj->high_list.next);
+ list_del_init(obj->high_list.next);
+ }
+ mutex_unlock(messageq_state.gate_handle);
+ }
+ }
+ return status;
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_get failed! status = 0x%x\n", status);
+ return status;
+}
+EXPORT_SYMBOL(messageq_get);
+
+/*
+ * ======== messageq_count ========
+ * Purpose:
+ * Count the number of messages in the queue
+ */
+int messageq_count(void *messageq_handle)
+{
+ struct messageq_object *obj = (struct messageq_object *)messageq_handle;
+ int count = 0;
+ struct list_head *elem;
+ int key;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(obj == NULL)) {
+ printk(KERN_ERR "messageq_count: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ key = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (key < 0)
+ return key;
+
+ list_for_each(elem, &obj->high_list) {
+ count++;
+ }
+ list_for_each(elem, &obj->normal_list) {
+ count++;
+ }
+ mutex_unlock(messageq_state.gate_handle);
+
+exit:
+ return count;
+}
+EXPORT_SYMBOL(messageq_count);
+
+/*
+ * ======== messageq_static_msg_init ========
+ * Purpose:
+ * Initialize a static message
+ */
+void messageq_static_msg_init(messageq_msg msg, u32 size)
+{
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_static_msg_init: msg is invalid!\n");
+ goto exit;
+ }
+
+ /* Fill in the fields of the message */
+ msg->heap_id = MESSAGEQ_STATICMSG;
+ msg->msg_size = size;
+ msg->reply_id = (u16)MESSAGEQ_INVALIDMESSAGEQ;
+ msg->msg_id = MESSAGEQ_INVALIDMSGID;
+ msg->dst_id = (u16)MESSAGEQ_INVALIDMESSAGEQ;
+ msg->flags = MESSAGEQ_HEADERVERSION | MESSAGEQ_NORMALPRI;
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_static_msg_init);
+
+/*
+ * ======== messageq_alloc ========
+ * Purpose:
+ * Allocate a message and initial the needed fields (note some
+ * of the fields in the header at set via other APIs or in the
+ * messageq_put function.
+ */
+messageq_msg messageq_alloc(u16 heap_id, u32 size)
+{
+ messageq_msg msg = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(heap_id >= messageq_state.num_heaps)) {
+ printk(KERN_ERR "messageq_alloc: heap_id is invalid!\n");
+ goto exit;
+ }
+
+ if (messageq_state.heaps[heap_id] != NULL) {
+ /* Allocate the message. No alignment requested */
+ msg = heap_alloc(messageq_state.heaps[heap_id], size, 0);
+ if (msg == NULL) {
+ printk(KERN_ERR "messageq_alloc: message allocation "
+ "failed!\n");
+ goto exit;
+ }
+
+ /* Fill in the fields of the message */
+ msg->msg_size = size;
+ msg->heap_id = heap_id;
+ msg->reply_id = (u16)MESSAGEQ_INVALIDMESSAGEQ;
+ msg->dst_id = (u16)MESSAGEQ_INVALIDMESSAGEQ;
+ msg->msg_id = MESSAGEQ_INVALIDMSGID;
+ msg->flags = MESSAGEQ_HEADERVERSION | MESSAGEQ_NORMALPRI;
+ } else {
+ printk(KERN_ERR "messageq_alloc: heap_id was not "
+ "registered!\n");
+ }
+
+exit:
+ return msg;
+}
+EXPORT_SYMBOL(messageq_alloc);
+
+/*
+ * ======== messageq_free ========
+ * Purpose:
+ * Frees the message.
+ */
+int messageq_free(messageq_msg msg)
+{
+ u32 status = 0;
+ void *heap = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(msg == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (msg->heap_id >= messageq_state.num_heaps) {
+ status = MESSAGEQ_E_INVALIDHEAPID;
+ goto exit;
+ }
+ if (msg->heap_id == MESSAGEQ_STATICMSG) {
+ status = MESSAGEQ_E_CANNOTFREESTATICMSG;
+ goto exit;
+ }
+
+ heap = messageq_state.heaps[msg->heap_id];
+ heap_free(heap, msg, msg->msg_size);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_free failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_free);
+
+/*
+ * ======== messageq_put ========
+ * Purpose:
+ * Put a message in the queue
+ */
+int messageq_put(u32 queue_id, messageq_msg msg)
+{
+ int status = 0;
+ u16 dst_proc_id = (u16)(queue_id >> 16);
+ struct messageq_object *obj = NULL;
+ void *transport = NULL;
+ u32 priority;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(msg == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ msg->dst_id = (u16)(queue_id);
+ msg->dst_proc = (u16)(queue_id >> 16);
+ if (dst_proc_id != multiproc_get_id(NULL)) {
+ if (dst_proc_id >= multiproc_get_max_processors()) {
+ /* Invalid destination processor id */
+ status = MESSAGEQ_E_INVALIDPROCID;
+ goto exit;
+ }
+
+ priority = (u32)((msg->flags) & MESSAGEQ_TRANSPORTPRIORITYMASK);
+ /* Call the transport associated with this message queue */
+ transport = messageq_state.transports[dst_proc_id][priority];
+ if (transport == NULL) {
+ /* Try the other transport */
+ priority = !priority;
+ transport =
+ messageq_state.transports[dst_proc_id][priority];
+ }
+
+ if (transport != NULL)
+ status = messageq_transportshm_put(transport, msg);
+ else {
+ status = -ENODEV;
+ goto exit;
+ }
+ } else {
+ /* It is a local MessageQ */
+ obj = (struct messageq_object *)
+ (messageq_state.queues[(u16)(queue_id)]);
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status < 0)
+ goto exit;
+ if ((msg->flags & MESSAGEQ_PRIORITYMASK) == \
+ MESSAGEQ_URGENTPRI) {
+ list_add((struct list_head *) msg, &obj->high_list);
+ } else {
+ if ((msg->flags & MESSAGEQ_PRIORITYMASK) == \
+ MESSAGEQ_NORMALPRI) {
+ list_add_tail((struct list_head *) msg,
+ &obj->normal_list);
+ } else {
+ list_add_tail((struct list_head *) msg,
+ &obj->high_list);
+ }
+ }
+ mutex_unlock(messageq_state.gate_handle);
+
+ /* Notify the reader. */
+ if (obj->synchronizer != NULL) {
+ up(obj->synchronizer);
+ /*OsalSemaphore_post(obj->synchronizer);*/
+ }
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_put failed! status = 0x%x\n", status);
+ return status;
+}
+EXPORT_SYMBOL(messageq_put);
+
+/*
+ * ======== messageq_register_heap ========
+ * Purpose:
+ * register a heap
+ */
+int messageq_register_heap(void *heap_handle, u16 heap_id)
+{
+ int status = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+ /* Make sure the heap_id is valid */
+ if (WARN_ON(heap_id >= messageq_state.num_heaps)) {
+ /*! @retval MESSAGEQ_E_HEAPIDINVALID Invalid heap_id */
+ status = MESSAGEQ_E_HEAPIDINVALID;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_state.heaps[heap_id] == NULL)
+ messageq_state.heaps[heap_id] = heap_handle;
+ else {
+ /*! @retval MESSAGEQ_E_ALREADYEXISTS Specified heap is
+ already registered. */
+ status = MESSAGEQ_E_ALREADYEXISTS;
+ }
+ mutex_unlock(messageq_state.gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_register_heap failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_register_heap);
+
+/*
+ * ======== messageq_unregister_heap ========
+ * Purpose:
+ * Unregister a heap
+ */
+int messageq_unregister_heap(u16 heap_id)
+{
+ int status = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Make sure the heap_id is valid */
+ if (WARN_ON(heap_id > messageq_state.num_heaps)) {
+ /*! @retval MESSAGEQ_E_HEAPIDINVALID Invalid heap_id */
+ status = MESSAGEQ_E_HEAPIDINVALID;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_state.heaps != NULL)
+ messageq_state.heaps[heap_id] = NULL;
+ mutex_unlock(messageq_state.gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_unregister_heap failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_unregister_heap);
+
+/*
+ * ======== messageq_register_transport ========
+ * Purpose:
+ * register a transport
+ */
+int messageq_register_transport(void *messageq_transportshm_handle,
+ u16 proc_id, u32 priority)
+{
+ int status = 0;
+
+ BUG_ON(messageq_transportshm_handle == NULL);
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Make sure the proc_id is valid */
+ if (WARN_ON(proc_id >= multiproc_get_max_processors())) {
+ /*! @retval MESSAGEQ_E_PROCIDINVALID Invalid proc_id */
+ status = MESSAGEQ_E_PROCIDINVALID;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_state.transports[proc_id][priority] == NULL) {
+ messageq_state.transports[proc_id][priority] = \
+ messageq_transportshm_handle;
+ } else {
+ /*! @retval MESSAGEQ_E_ALREADYEXISTS Specified transport is
+ already registered. */
+ status = MESSAGEQ_E_ALREADYEXISTS;
+ }
+ mutex_unlock(messageq_state.gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_register_transport failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_register_transport);
+
+/*
+ * ======== messageq_unregister_transport ========
+ * Purpose:
+ * Unregister a transport
+ */
+int messageq_unregister_transport(u16 proc_id, u32 priority)
+{
+ int status = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Make sure the proc_id is valid */
+ if (WARN_ON(proc_id >= multiproc_get_max_processors())) {
+ /*! @retval MESSAGEQ_E_PROCIDINVALID Invalid proc_id */
+ status = MESSAGEQ_E_PROCIDINVALID;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_state.gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_state.transports[proc_id][priority] == NULL)
+ messageq_state.transports[proc_id][priority] = NULL;
+ mutex_unlock(messageq_state.gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_unregister_transport failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_unregister_transport);
+
+/*
+ * ======== messageq_set_reply_queue ========
+ * Purpose:
+ * Set the destination queue of the message.
+ */
+void messageq_set_reply_queue(void *messageq_handle, messageq_msg msg)
+{
+ struct messageq_object *obj = \
+ (struct messageq_object *) messageq_handle;
+
+ BUG_ON(messageq_handle == NULL);
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_set_reply_queue: msg passed is "
+ "NULL!\n");
+ goto exit;
+ }
+
+ msg->reply_id = (u16)(obj->queue);
+ msg->reply_proc = (u16)(obj->queue >> 16);
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_set_reply_queue);
+
+/*
+ * ======== messageq_get_queue_id ========
+ * Purpose:
+ * Get the queue _id of the message.
+ */
+u32 messageq_get_queue_id(void *messageq_handle)
+{
+ struct messageq_object *obj = \
+ (struct messageq_object *) messageq_handle;
+ u32 queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(obj == NULL)) {
+ printk(KERN_ERR "messageq_get_queue_id: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ queue_id = (obj->queue);
+
+exit:
+ return queue_id;
+}
+EXPORT_SYMBOL(messageq_get_queue_id);
+
+/*
+ * ======== messageq_get_proc_id ========
+ * Purpose:
+ * Get the proc _id of the message.
+ */
+u16 messageq_get_proc_id(void *messageq_handle)
+{
+ struct messageq_object *obj = \
+ (struct messageq_object *) messageq_handle;
+ u16 proc_id = MULTIPROC_INVALIDID;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(obj == NULL)) {
+ printk(KERN_ERR "messageq_get_proc_id: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ proc_id = (u16)(obj->queue >> 16);
+
+exit:
+ return proc_id;
+}
+EXPORT_SYMBOL(messageq_get_proc_id);
+
+/*
+ * ======== messageq_get_dst_queue ========
+ * Purpose:
+ * Get the destination queue of the message.
+ */
+u32 messageq_get_dst_queue(messageq_msg msg)
+{
+ u32 queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_get_dst_queue: msg passed is "
+ "NULL!\n");
+ goto exit;
+ }
+
+ /*construct queue value */
+ if (msg->dst_id != (u32)MESSAGEQ_INVALIDMESSAGEQ)
+ queue_id = ((u32) multiproc_get_id(NULL) << 16) | msg->dst_id;
+
+exit:
+ return queue_id;
+}
+EXPORT_SYMBOL(messageq_get_dst_queue);
+
+/*
+ * ======== messageq_get_msg_id ========
+ * Purpose:
+ * Get the message id of the message.
+ */
+u16 messageq_get_msg_id(messageq_msg msg)
+{
+ u16 id = MESSAGEQ_INVALIDMSGID;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_get_msg_id: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ id = msg->msg_id;
+
+exit:
+ return id;
+}
+EXPORT_SYMBOL(messageq_get_msg_id);
+
+/*
+ * ======== messageq_get_msg_size ========
+ * Purpose:
+ * Get the message size of the message.
+ */
+u32 messageq_get_msg_size(messageq_msg msg)
+{
+ u32 size = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_get_msg_size: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ size = msg->msg_size;
+
+exit:
+ return size;
+}
+EXPORT_SYMBOL(messageq_get_msg_size);
+
+/*
+ * ======== messageq_get_msg_pri ========
+ * Purpose:
+ * Get the message priority of the message.
+ */
+u32 messageq_get_msg_pri(messageq_msg msg)
+{
+ u32 priority = MESSAGEQ_NORMALPRI;
+
+ BUG_ON(msg == NULL);
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_get_msg_pri: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ priority = ((u32)(msg->flags & MESSAGEQ_PRIORITYMASK));
+
+exit:
+ return priority;
+}
+EXPORT_SYMBOL(messageq_get_msg_pri);
+
+/*
+ * ======== messageq_get_reply_queue ========
+ * Purpose:
+ * Get the embedded source message queue out of the message.
+ */
+u32 messageq_get_reply_queue(messageq_msg msg)
+{
+ u32 queue = MESSAGEQ_INVALIDMESSAGEQ;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_get_reply_queue: msg passed is "
+ "NULL!\n");
+ goto exit;
+ }
+
+ if (msg->reply_id != (u16)MESSAGEQ_INVALIDMESSAGEQ)
+ queue = ((u32)(msg->reply_proc) << 16) | msg->reply_id;
+
+exit:
+ return queue;
+}
+EXPORT_SYMBOL(messageq_get_reply_queue);
+
+/*
+ * ======== messageq_set_msg_id ========
+ * Purpose:
+ * Set the message id of the message.
+ */
+void messageq_set_msg_id(messageq_msg msg, u16 msg_id)
+{
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_set_msg_id: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ msg->msg_id = msg_id;
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_set_msg_id);
+
+/*
+ * ======== messageq_set_msg_pri ========
+ * Purpose:
+ * Set the priority of the message.
+ */
+void messageq_set_msg_pri(messageq_msg msg, u32 priority)
+{
+ if (WARN_ON(atomic_cmpmask_and_lt(&(messageq_state.ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_set_msg_pri: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ msg->flags = priority & MESSAGEQ_PRIORITYMASK;
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_set_msg_pri);
+
+/* =============================================================================
+ * Internal functions
+ * =============================================================================
+ */
+/*
+ * ======== _messageq_grow ========
+ * Purpose:
+ * Grow the MessageQ table
+ */
+u16 _messageq_grow(struct messageq_object *obj)
+{
+ u16 queue_index = messageq_state.num_queues;
+ int oldSize;
+ void **queues;
+ void **oldqueues;
+
+ if (WARN_ON(obj == NULL)) {
+ printk(KERN_ERR "_messageq_grow: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ oldSize = (messageq_state.num_queues) * \
+ sizeof(struct messageq_object *);
+ queues = kmalloc(oldSize + sizeof(struct messageq_object *),
+ GFP_KERNEL);
+ if (queues == NULL) {
+ printk(KERN_ERR "_messageq_grow: Growing the messageq "
+ "failed!\n");
+ goto exit;
+ }
+
+ /* Copy contents into new table */
+ memcpy(queues, messageq_state.queues, oldSize);
+ /* Fill in the new entry */
+ queues[queue_index] = (void *)obj;
+ /* Hook-up new table */
+ oldqueues = messageq_state.queues;
+ messageq_state.queues = queues;
+ messageq_state.num_queues++;
+
+ /* Delete old table if not statically defined*/
+ if (messageq_state.can_free_queues == true)
+ kfree(oldqueues);
+ else
+ messageq_state.can_free_queues = true;
+
+exit:
+ return queue_index;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c b/drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c
new file mode 100644
index 000000000000..12283479b5b3
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c
@@ -0,0 +1,490 @@
+/*
+ * messageq_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the messageq
+ * module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+/* Module Headers */
+#include <messageq.h>
+#include <messageq_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== messageq_ioctl_put ========
+ * Purpose:
+ * This ioctl interface to messageq_put function
+ */
+static inline int messageq_ioctl_put(struct messageq_cmd_args *cargs)
+{
+ int status = 0;
+ messageq_msg msg;
+
+ msg = (messageq_msg) sharedregion_get_ptr(cargs->args.put.msg_srptr);
+ if (unlikely(msg == NULL))
+ goto exit;
+
+ status = messageq_put(cargs->args.put.queue_id, msg);
+
+ cargs->api_status = status;
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_get ========
+ * Purpose:
+ * This ioctl interface to messageq_get function
+ */
+static inline int messageq_ioctl_get(struct messageq_cmd_args *cargs)
+{
+ messageq_msg msg = NULL;
+ u32 *msg_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ cargs->api_status = messageq_get(cargs->args.get.messageq_handle,
+ &msg,
+ cargs->args.get.timeout);
+ if (unlikely(cargs->api_status < 0))
+ goto exit;
+
+ index = sharedregion_get_index(msg);
+ if (unlikely(index < 0)) {
+ cargs->api_status = index;
+ goto exit;
+ }
+
+ msg_srptr = sharedregion_get_srptr(msg, index);
+
+exit:
+ cargs->args.get.msg_srptr = msg_srptr;
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_count ========
+ * Purpose:
+ * This ioctl interface to messageq_count function
+ */
+static inline int messageq_ioctl_count(struct messageq_cmd_args *cargs)
+{
+ int result = messageq_count(cargs->args.count.messageq_handle);
+ if (result < 0)
+ cargs->api_status = result;
+ else
+ cargs->args.count.count = result;
+
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to messageq_alloc function
+ */
+static inline int messageq_ioctl_alloc(struct messageq_cmd_args *cargs)
+{
+ messageq_msg msg;
+ u32 *msg_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ msg = messageq_alloc(cargs->args.alloc.heap_id, cargs->args.alloc.size);
+ if (unlikely(msg == NULL))
+ goto exit;
+
+ index = sharedregion_get_index(msg);
+ if (unlikely(index < 0))
+ goto exit;
+
+ msg_srptr = sharedregion_get_srptr(msg, index);
+
+ cargs->api_status = 0;
+exit:
+ cargs->args.alloc.msg_srptr = msg_srptr;
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to messageq_free function
+ */
+static inline int messageq_ioctl_free(struct messageq_cmd_args *cargs)
+{
+ int status = 0;
+ messageq_msg msg;
+
+ msg = sharedregion_get_ptr(cargs->args.free.msg_srptr);
+ if (unlikely(msg == NULL))
+ goto exit;
+ status = messageq_free(msg);
+
+ cargs->api_status = status;
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to messageq_params_init function
+ */
+static inline int messageq_ioctl_params_init(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct messageq_params params;
+
+ messageq_params_init(cargs->args.params_init.messageq_handle,
+ &params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct messageq_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to messageq_create function
+ */
+static inline int messageq_ioctl_create(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct messageq_params params;
+ char *name = NULL;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct messageq_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ /* Allocate memory for the name */
+ if (cargs->args.create.name_len > 0) {
+ name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ size = copy_from_user(name, cargs->args.create.name,
+ cargs->args.create.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ cargs->args.create.messageq_handle = messageq_create(name, &params);
+ if (cargs->args.create.messageq_handle != NULL) {
+ cargs->args.create.queue_id = messageq_get_queue_id(
+ cargs->args.create.messageq_handle);
+ }
+
+free_name:
+ if (cargs->args.create.name_len > 0)
+ kfree(name);
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to messageq_delete function
+ */
+static inline int messageq_ioctl_delete(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status =
+ messageq_delete(&(cargs->args.delete_messageq.messageq_handle));
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to messageq_open function
+ */
+static inline int messageq_ioctl_open(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ char *name = NULL;
+ u32 queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+ /* Allocate memory for the name */
+ if (cargs->args.open.name_len > 0) {
+ name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ size = copy_from_user(name, cargs->args.open.name,
+ cargs->args.open.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ status = messageq_open(name, &queue_id);
+ cargs->args.open.queue_id = queue_id;
+
+free_name:
+ if (cargs->args.open.name_len > 0)
+ kfree(name);
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to messageq_close function
+ */
+static inline int messageq_ioctl_close(struct messageq_cmd_args *cargs)
+{
+ u32 queue_id = cargs->args.close.queue_id;
+ messageq_close(&queue_id);
+ cargs->args.close.queue_id = queue_id;
+
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to messageq_get_config function
+ */
+static inline int messageq_ioctl_get_config(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_config config;
+
+ messageq_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct messageq_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to messageq_setup function
+ */
+static inline int messageq_ioctl_setup(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct messageq_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = messageq_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to messageq_destroy function
+ */
+static inline int messageq_ioctl_destroy(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = messageq_destroy();
+ return 0;
+}
+
+
+/*
+ * ======== messageq_ioctl_register_heap ========
+ * Purpose:
+ * This ioctl interface to messageq_register_heap function
+ */
+static inline int messageq_ioctl_register_heap(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = \
+ messageq_register_heap(cargs->args.register_heap.heap_handle,
+ cargs->args.register_heap.heap_id);
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_unregister_heap ========
+ * Purpose:
+ * This ioctl interface to messageq_unregister_heap function
+ */
+static inline int messageq_ioctl_unregister_heap(
+ struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = messageq_unregister_heap(
+ cargs->args.unregister_heap.heap_id);
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl ========
+ * Purpose:
+ * ioctl interface function for messageq module
+ */
+int messageq_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct messageq_cmd_args __user *uarg =
+ (struct messageq_cmd_args __user *)args;
+ struct messageq_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct messageq_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_MESSAGEQ_PUT:
+ os_status = messageq_ioctl_put(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_GET:
+ os_status = messageq_ioctl_get(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_COUNT:
+ os_status = messageq_ioctl_count(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_ALLOC:
+ os_status = messageq_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_FREE:
+ os_status = messageq_ioctl_free(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_PARAMS_INIT:
+ os_status = messageq_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_CREATE:
+ os_status = messageq_ioctl_create(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_DELETE:
+ os_status = messageq_ioctl_delete(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_OPEN:
+ os_status = messageq_ioctl_open(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_CLOSE:
+ os_status = messageq_ioctl_close(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_GETCONFIG:
+ os_status = messageq_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_SETUP:
+ os_status = messageq_ioctl_setup(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_DESTROY:
+ os_status = messageq_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_REGISTERHEAP:
+ os_status = messageq_ioctl_register_heap(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_UNREGISTERHEAP:
+ os_status = messageq_ioctl_unregister_heap(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct messageq_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+ return os_status;
+
+exit:
+ printk(KERN_ERR "messageq_ioctl failed: status = 0x%x\n", os_status);
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/messageq_transportshm.c b/drivers/dsp/syslink/multicore_ipc/messageq_transportshm.c
new file mode 100644
index 000000000000..d07ca3479466
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/messageq_transportshm.c
@@ -0,0 +1,778 @@
+/*
+ * messageq_transportshm.c
+ *
+ * MessageQ Transport module
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+/* Module level headers */
+#include <multiproc.h>
+#include <nameserver.h>
+#include <gatepeterson.h>
+#include <notify.h>
+#include <messageq.h>
+#include <listmp_sharedmemory.h>
+#include <messageq_transportshm.h>
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/* Cache line size */
+#define MESSAGEQ_TRANSPORTSHM_CACHESIZE 128
+
+/* Indicates that the transport is up. */
+#define MESSAGEQ_TRANSPORTSHM_UP 0xBADC0FFE
+
+/* messageq_transportshm Version. */
+#define MESSAGEQ_TRANSPORTSHM_VERSION 1
+
+/*!
+ * @brief Macro to make a correct module magic number with refCount
+ */
+#define MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(x) \
+ ((MESSAGEQ_TRANSPORTSHM_MODULEID << 12u) | (x))
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/*
+ * Defines the messageq_transportshm state object, which contains all the
+ * module specific information.
+ */
+struct messageq_transportshm_moduleobject {
+ atomic_t ref_count;
+ struct messageq_transportshm_config cfg;
+ /*< messageq_transportshm configuration structure */
+ struct messageq_transportshm_config def_cfg;
+ /*< Default module configuration */
+ struct messageq_transportshm_params def_inst_params;
+ /*< Default instance parameters */
+ void *gate_handle;
+ /*< Handle to the gate for local thread safety */
+ void *transports[MULTIPROC_MAXPROCESSORS][MESSAGEQ_NUM_PRIORITY_QUEUES];
+ /*!< Transport to be set in messageq_register_transport */
+
+};
+
+/*
+ * Structure of attributes in shared memory
+ */
+struct messageq_transportshm_attrs {
+ VOLATILE u32 version;
+ VOLATILE u32 flag;
+};
+
+/*
+ * Structure defining config parameters for the MessageQ transport
+ * instances.
+ */
+struct messageq_transportshm_object {
+ VOLATILE struct messageq_transportshm_attrs *attrs[2];
+ /* Attributes for both processors */
+ void *my_listmp_handle;
+ /* List for this processor */
+ void *remote_listmp_handle;
+ /* List for remote processor */
+ VOLATILE int status;
+ /* Current status */
+ int my_index;
+ /* 0 | 1 */
+ int remote_index;
+ /* 1 | 0 */
+ int notify_event_no;
+ /* Notify event to be used */
+ void *notify_driver;
+ /* Notify driver to be used */
+ u16 proc_id;
+ /* Dest proc id */
+ void *gate;
+ /* Gate for critical regions */
+ struct messageq_transportshm_params params;
+ /* Instance specific parameters */
+ u32 priority;
+ /*!< Priority of messages supported by this transport */
+};
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*
+ * @var messageq_transportshm_state
+ *
+ * messageq_transportshm state object variable
+ */
+static struct messageq_transportshm_moduleobject messageq_transportshm_state = {
+ .gate_handle = NULL,
+ .def_cfg.err_fxn = 0,
+ .def_inst_params.gate = NULL,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.shared_addr_size = 0x0,
+ .def_inst_params.notify_event_no = (u32)(-1),
+ .def_inst_params.notify_driver = NULL,
+ .def_inst_params.priority = MESSAGEQ_NORMALPRI
+};
+
+
+/* =============================================================================
+ * Forward declarations of internal functions
+ * =============================================================================
+ */
+/* Callback function registered with the Notify module. */
+static void _messageq_transportshm_notify_fxn(u16 proc_id,
+ u32 event_no, void *arg, u32 payload);
+
+/* =============================================================================
+ * APIs called directly by applications
+ * =============================================================================
+ */
+/*
+ * ======== messageq_transportshm_get_config ========
+ * Purpose:
+ * Get the default configuration for the messageq_transportshm
+ * module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to messageq_transportshm_setup filled in
+ * by the messageq_transportshm module with the default parameters.
+ * If the user does not wish to make any change in the default
+ * parameters, this API is not required to be called.
+ */
+void messageq_transportshm_get_config(
+ struct messageq_transportshm_config *cfg)
+{
+ if (WARN_ON(cfg == NULL))
+ goto exit;
+
+ if (atomic_cmpmask_and_lt(&(messageq_transportshm_state.ref_count),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true) {
+ memcpy(cfg, &(messageq_transportshm_state.def_cfg),
+ sizeof(struct messageq_transportshm_config));
+ } else {
+ memcpy(cfg, &(messageq_transportshm_state.cfg),
+ sizeof(struct messageq_transportshm_config));
+ }
+ return;
+
+exit:
+ printk(KERN_ERR "messageq_transportshm_get_config: Argument of type"
+ "(struct messageq_transportshm_config *) passed is null!\n");
+}
+
+
+/*
+ * ======== messageq_transportshm_setup ========
+ * Purpose:
+ * Setup the messageq_transportshm module.
+ *
+ * This function sets up the messageq_transportshm module. This
+ * function must be called before any other instance-level APIs can
+ * be invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then messageq_transportshm_getConfig can be called
+ * to get the configuration filled with the default values. After
+ * this, only the required configuration values can be changed. If
+ * the user does not wish to make any change in the default
+ * parameters, the application can simply call
+ * messageq_transportshm_setup with NULL parameters. The default
+ * parameters would get automatically used.
+ */
+int messageq_transportshm_setup(const struct messageq_transportshm_config *cfg)
+{
+ int status = MESSAGEQ_TRANSPORTSHM_SUCCESS;
+ struct messageq_transportshm_config tmpCfg;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&messageq_transportshm_state.ref_count,
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&messageq_transportshm_state.ref_count)
+ != MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1u)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ messageq_transportshm_get_config(&tmpCfg);
+ cfg = &tmpCfg;
+ }
+
+ messageq_transportshm_state.gate_handle = \
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (messageq_transportshm_state.gate_handle == NULL) {
+ /* @retval MESSAGEQTRANSPORTSHM_E_FAIL Failed to create
+ GateMutex! */
+ status = MESSAGEQ_TRANSPORTSHM_E_FAIL;
+ printk(KERN_ERR "messageq_transportshm_setup: Failed to create "
+ "mutex!\n");
+ atomic_set(&messageq_transportshm_state.ref_count,
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0));
+ goto exit;
+ }
+ mutex_init(messageq_transportshm_state.gate_handle);
+
+ /* Copy the user provided values into the state object. */
+ memcpy(&messageq_transportshm_state.cfg, cfg,
+ sizeof(struct messageq_transportshm_config));
+ memset(&(messageq_transportshm_state.transports), 0, (sizeof(void *) * \
+ MULTIPROC_MAXPROCESSORS * MESSAGEQ_NUM_PRIORITY_QUEUES));
+ return status;
+
+exit:
+ printk(KERN_ERR "messageq_transportshm_setup failed: status = 0x%x",
+ status);
+ return status;
+}
+
+
+/*
+ * ======== messageq_transportshm_destroy ========
+ * Purpose:
+ * Destroy the messageq_transportshm module.
+ *
+ * Once this function is called, other messageq_transportshm module
+ * APIs, except for the messageq_transportshm_getConfig API cannot
+ * be called anymore.
+ */
+int messageq_transportshm_destroy(void)
+{
+ int status = 0;
+ u16 i;
+ u16 j;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(messageq_transportshm_state.ref_count),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&messageq_transportshm_state.ref_count)
+ == MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0))) {
+ status = 1;
+ goto exit;
+ }
+
+ /* Temporarily increment ref_count here. */
+ atomic_set(&messageq_transportshm_state.ref_count,
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1));
+
+ /* Delete any Transports that have not been deleted so far. */
+ for (i = 0; i < MULTIPROC_MAXPROCESSORS; i++) {
+ for (j = 0 ; j < MESSAGEQ_NUM_PRIORITY_QUEUES; j++) {
+ if (messageq_transportshm_state.transports[i][j] != \
+ NULL) {
+ messageq_transportshm_delete(&
+ (messageq_transportshm_state.transports[i][j]));
+ }
+ }
+ }
+ if (messageq_transportshm_state.gate_handle != NULL) {
+ kfree(messageq_transportshm_state.gate_handle);
+ messageq_transportshm_state.gate_handle = NULL;
+ }
+
+ /* Decrease the ref_count */
+ atomic_set(&messageq_transportshm_state.ref_count,
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0));
+ return 0;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_transportshm_destroy failed: "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+
+/*
+ * ======== messageq_transportshm_params_init ========
+ * Purpose:
+ * Get Instance parameters
+ */
+void messageq_transportshm_params_init(void *mqtshm_handle,
+ struct messageq_transportshm_params *params)
+{
+ struct messageq_transportshm_object *object = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(messageq_transportshm_state.ref_count),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ printk(KERN_ERR "messageq_transportshm_params_init: Module was "
+ " not initialized\n");
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "messageq_transportshm_params_init: Argument of"
+ " type (struct messageq_transportshm_params *) "
+ "is NULL!");
+ goto exit;
+ }
+
+ if (mqtshm_handle == NULL) {
+ memcpy(params, &(messageq_transportshm_state.def_inst_params),
+ sizeof(struct messageq_transportshm_params));
+ } else {
+ /* Return updated messageq_transportshm instance
+ specific parameters. */
+ object = (struct messageq_transportshm_object *) mqtshm_handle;
+ memcpy(params, &(object->params),
+ sizeof(struct messageq_transportshm_params));
+ }
+
+exit:
+ return;
+}
+
+/*
+ * ======== messageq_transportshm_create ========
+ * Purpose:
+ * Create a transport instance. This function waits for the remote
+ * processor to complete its transport creation. Hence it must be
+ * called only after the remote processor is running.
+ */
+void *messageq_transportshm_create(u16 proc_id,
+ const struct messageq_transportshm_params *params)
+{
+ struct messageq_transportshm_object *handle = NULL;
+ int status = 0;
+ int my_index;
+ int remote_index;
+ listmp_sharedmemory_params listmp_params[2];
+ VOLATILE u32 *otherflag;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(messageq_transportshm_state.ref_count),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(params->shared_addr_size < \
+ messageq_transportshm_shared_mem_req(params))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (messageq_transportshm_state.transports[proc_id][params->priority] \
+ != NULL) {
+ /* Specified transport is already registered. */
+ status = MESSAGEQ_E_ALREADYEXISTS;
+ goto exit;
+ }
+
+ /*
+ * Determine who gets the '0' slot and who gets the '1' slot
+ * The '0' slot is given to the lower multiproc id.
+ */
+ if (multiproc_get_id(NULL) < proc_id) {
+ my_index = 0;
+ remote_index = 1;
+ } else {
+ my_index = 1;
+ remote_index = 0;
+ }
+
+ handle = kzalloc(sizeof(struct messageq_transportshm_object),
+ GFP_KERNEL);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ handle->attrs[0] = (struct messageq_transportshm_attrs *)
+ params->shared_addr;
+ handle->attrs[1] = (struct messageq_transportshm_attrs *)
+ ((u32)(handle->attrs[0]) + \
+ MESSAGEQ_TRANSPORTSHM_CACHESIZE);
+ handle->status = messageq_transportshm_status_INIT;
+ handle->gate = params->gate;
+ memcpy(&(handle->params), (void *)params,
+ sizeof(struct messageq_transportshm_params));
+
+ status = notify_register_event(params->notify_driver, proc_id,
+ params->notify_event_no,
+ _messageq_transportshm_notify_fxn,
+ (void *)handle);
+ if (status < 0) {
+ /* @retval NULL Notify register failed */
+ printk(KERN_ERR "messageq_transportshm_create: "
+ "notify_register_event failed!\n");
+ goto notify_register_fail;
+ }
+
+ handle->notify_driver = params->notify_driver;
+ handle->notify_event_no = params->notify_event_no;
+ handle->priority = params->priority;
+ handle->proc_id = proc_id;
+ handle->my_index = my_index;
+ handle->remote_index = remote_index;
+
+ /* Create the shared lists for the transport. */
+ listmp_sharedmemory_params_init(NULL, &(listmp_params[0]));
+ listmp_params[0].shared_addr = (u32 *)((u32)(params->shared_addr) + \
+ (2 * MESSAGEQ_TRANSPORTSHM_CACHESIZE));
+ listmp_params[0].shared_addr_size = \
+ listmp_sharedmemory_shared_memreq(&(listmp_params[0]));
+ listmp_params[0].gate = params->gate;
+ listmp_params[0].name = NULL;
+ listmp_params[0].list_type = listmp_type_SHARED;
+
+ listmp_sharedmemory_params_init(NULL, &(listmp_params[1]));
+ listmp_params[1].shared_addr = \
+ (u32 *)((u32)(listmp_params[0].shared_addr) + \
+ listmp_params[0].shared_addr_size);
+ listmp_params[1].shared_addr_size = \
+ listmp_sharedmemory_shared_memreq(&(listmp_params[1]));
+ listmp_params[1].name = NULL;
+ listmp_params[1].list_type = listmp_type_SHARED;
+ listmp_params[1].gate = params->gate;
+
+ handle->my_listmp_handle = listmp_sharedmemory_create
+ (&(listmp_params[my_index]));
+ handle->attrs[my_index]->version = MESSAGEQ_TRANSPORTSHM_VERSION;
+ handle->attrs[my_index]->flag = MESSAGEQ_TRANSPORTSHM_UP;
+
+ /* Store in VOLATILE to make sure it is not compiled out... */
+ otherflag = &(handle->attrs[remote_index]->flag);
+
+ /* Loop until the other side is up */
+ while (*otherflag != MESSAGEQ_TRANSPORTSHM_UP)
+ ;
+
+ if (handle->attrs[remote_index]->version
+ != MESSAGEQ_TRANSPORTSHM_VERSION) {
+ /* @retval NULL Versions do not match */
+ printk(KERN_ERR "messageq_transportshm_create: "
+ "Incorrect version of remote transport!\n");
+ goto exit;
+ }
+
+ status = listmp_sharedmemory_open
+ ((listmp_sharedmemory_handle *) &(handle->remote_listmp_handle),
+ &listmp_params[remote_index]);
+ if (status < 0) {
+ /* @retval NULL List creation failed */
+ goto listmp_open_fail;
+ }
+
+ /* Register the transport with MessageQ */
+ status = messageq_register_transport((void *)handle, proc_id,
+ (u32)params->priority);
+ if (status >= 0) {
+ messageq_transportshm_state.transports
+ [proc_id][params->priority] = (void *)handle;
+ handle->status = messageq_transportshm_status_UP;
+ }
+ return handle;
+
+listmp_open_fail:
+ printk(KERN_ERR "messageq_transportshm_create: "
+ "listmp_sharedmemory_open failed!\n");
+notify_register_fail:
+ if (status < 0) {
+ if (handle != NULL)
+ messageq_transportshm_delete((void **)(&handle));
+ }
+
+exit:
+ printk(KERN_ERR "messageq_transportshm_create failed: status = 0x%x\n",
+ status);
+ return handle;
+}
+
+/*
+ * ======== messageq_transportshm_delete ========
+ * Purpose:
+ * Delete instance
+ */
+int messageq_transportshm_delete(void **mqtshm_handleptr)
+{
+ int status = 0;
+ int tmpstatus = 0;
+ struct messageq_transportshm_object *obj;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(messageq_transportshm_state.ref_count),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(mqtshm_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*mqtshm_handleptr == NULL)) {
+ /* @retval MESSAGEQTRANSPORTSHM_E_HANDLE Invalid NULL handle
+ specified */
+ status = MESSAGEQ_TRANSPORTSHM_E_HANDLE;
+ printk(KERN_WARNING "messageq_transportshm_delete: Invalid NULL"
+ " mqtshm_handle specified! status = 0x%x\n", status);
+ goto exit;
+ }
+
+ obj = (struct messageq_transportshm_object *) (*mqtshm_handleptr);
+ /* Clear handle in the local array */
+ messageq_transportshm_state.transports[obj->proc_id][obj->priority] = \
+ NULL;
+ obj->attrs[obj->my_index]->flag = 0;
+ status = listmp_sharedmemory_delete(
+ (listmp_sharedmemory_handle *)&obj->my_listmp_handle);
+ if (status < 0) {
+ printk(KERN_WARNING "messageq_transportshm_delete: Failed to "
+ "delete listmp_sharedmemory instance!\n");
+ }
+
+ tmpstatus = listmp_sharedmemory_close(
+ (listmp_sharedmemory_handle) obj->remote_listmp_handle);
+ if ((tmpstatus < 0) && (status >= 0)) {
+ status = tmpstatus;
+ printk(KERN_WARNING "messageq_transportshm_delete: Failed to "
+ "close listmp_sharedmemory instance!\n");
+ }
+
+ tmpstatus = messageq_unregister_transport(obj->proc_id,
+ obj->params.priority);
+ if ((tmpstatus < 0) && (status >= 0)) {
+ status = tmpstatus;
+ printk(KERN_WARNING "messageq_transportshm_delete: Failed to "
+ "unregister transport!\n");
+ }
+
+ tmpstatus = notify_unregister_event(obj->notify_driver, obj->proc_id,
+ obj->notify_event_no,
+ _messageq_transportshm_notify_fxn,
+ (void *)obj);
+ if ((tmpstatus < 0) && (status >= 0)) {
+ status = tmpstatus;
+ printk(KERN_WARNING "messageq_transportshm_delete: Failed to "
+ "unregister notify event!\n");
+ }
+
+ kfree(obj);
+ *mqtshm_handleptr = NULL;
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_transportshm_delete failed: "
+ "status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======== messageq_transportshm_put ========
+ * Purpose:
+ * Put msg to remote list
+*/
+int messageq_transportshm_put(void *mqtshm_handle,
+ void *msg)
+{
+ int status = 0;
+ struct messageq_transportshm_object *obj = \
+ (struct messageq_transportshm_object *) mqtshm_handle;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(messageq_transportshm_state.ref_count),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ MESSAGEQTRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ BUG_ON(mqtshm_handle == NULL);
+ if (WARN_ON(msg == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(obj == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ status = listmp_put_tail(obj->remote_listmp_handle,
+ (struct listmp_elem *) msg);
+ if (status < 0) {
+ /* @retval MESSAGEQ_TRANSPORTSHM_E_FAIL
+ * Notification to remote processor failed!
+ */
+ status = MESSAGEQ_TRANSPORTSHM_E_FAIL;
+ printk(KERN_ERR "messageq_transportshm_put: Failed to put "
+ "message in the shared list! status = 0x%x\n", status);
+ goto exit;
+ }
+
+ status = notify_sendevent(obj->notify_driver, obj->proc_id,
+ obj->notify_event_no, 0, true);
+ if (status < 0)
+ goto notify_send_fail;
+ else
+ goto exit;
+
+notify_send_fail:
+ printk(KERN_ERR "messageq_transportshm_put: Notification to remote "
+ "processor failed, status = 0x%x\n", status);
+ /* If sending the event failed, then remove the element from the list.*/
+ /* Ignore the status of remove. */
+ listmp_remove(obj->remote_listmp_handle, (struct listmp_elem *) msg);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_transportshm_put failed: "
+ "status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======== messageq_transportshm_control ========
+ * Purpose:
+ * Control Function
+*/
+int messageq_transportshm_control(void *mqtshm_handle, u32 cmd, u32 *cmdArg)
+{
+ BUG_ON(mqtshm_handle == NULL);
+
+ printk(KERN_ALERT "messageq_transportshm_control not supported!\n");
+ return MESSAGEQTRANSPORTSHM_E_NOTSUPPORTED;
+}
+
+/*
+ * ======== messageq_transportshm_get_status ========
+ * Purpose:
+ * Get status
+ */
+enum messageq_transportshm_status messageq_transportshm_get_status(
+ void *mqtshm_handle)
+{
+ struct messageq_transportshm_object *obj = \
+ (struct messageq_transportshm_object *) mqtshm_handle;
+
+ BUG_ON(obj == NULL);
+
+ return obj->status;
+}
+
+/*
+ * ======== messageq_transportshm_put ========
+ * Purpose:
+ * Get shared memory requirements.
+ */
+u32 messageq_transportshm_shared_mem_req(const
+ struct messageq_transportshm_params *params)
+{
+ u32 totalSize;
+ listmp_sharedmemory_params listmp_params;
+ u32 listmp_size;
+
+ /* There are two transport flags in shared memory */
+ totalSize = 2 * MESSAGEQ_TRANSPORTSHM_CACHESIZE;
+
+ listmp_sharedmemory_params_init(NULL, &listmp_params);
+ listmp_size = listmp_sharedmemory_shared_memreq(&listmp_params);
+
+ /* MyList */
+ totalSize += listmp_size;
+
+ /* RemoteList */
+ totalSize += listmp_size;
+
+ return totalSize;
+}
+
+
+/* =============================================================================
+ * internal functions
+ * =============================================================================
+ */
+/*
+ * ======== _messageq_transportshm_notify_fxn ========
+ * Purpose:
+ * Callback function registered with the Notify module.
+ */
+void _messageq_transportshm_notify_fxn(u16 proc_id, u32 event_no,
+ void *arg, u32 payload)
+{
+ struct messageq_transportshm_object *obj = NULL;
+ messageq_msg msg = NULL;
+ u32 queue_id;
+
+ if (WARN_ON(arg == NULL))
+ goto exit;
+
+ obj = (struct messageq_transportshm_object *)arg;
+ /* While there is are messages, get them out and send them to
+ * their final destination. */
+ while ((msg = (messageq_msg) listmp_get_head(obj->my_listmp_handle))
+ != NULL) {
+ /* Get the destination message queue Id */
+ queue_id = messageq_get_dst_queue(msg);
+ messageq_put(queue_id, msg);
+ }
+ return;
+
+exit:
+ printk(KERN_ERR "messageq_transportshm_notify_fxn: argument passed is "
+ "NULL!\n");
+}
+
+
+/*
+ * ======== messageq_transportshm_delete ========
+ * Purpose:
+ * This will set the asynchronous error function for the transport module
+ */
+void messageq_transportshm_set_err_fxn(
+ void (*err_fxn)(
+ enum MessageQTransportShm_Reason reason,
+ void *handle,
+ void *msg,
+ u32 info))
+{
+ u32 key;
+
+ key = mutex_lock_interruptible(messageq_transportshm_state.gate_handle);
+ if (key < 0)
+ goto exit;
+
+ messageq_transportshm_state.cfg.err_fxn = err_fxn;
+ mutex_unlock(messageq_transportshm_state.gate_handle);
+
+exit:
+ return;
+}
+
+
diff --git a/drivers/dsp/syslink/multicore_ipc/messageq_transportshm_ioctl.c b/drivers/dsp/syslink/multicore_ipc/messageq_transportshm_ioctl.c
new file mode 100644
index 000000000000..2b4af6192f27
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/messageq_transportshm_ioctl.c
@@ -0,0 +1,334 @@
+/*
+ * messageq_transportshm_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the
+ * messageq_transportshm module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+
+/* Module Headers */
+#include <messageq.h>
+#include <messageq_transportshm.h>
+#include <messageq_transportshm_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== messageq_transportshm_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_get_config function
+ */
+static inline int messageq_transportshm_ioctl_get_config(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_transportshm_config config;
+
+ messageq_transportshm_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct messageq_transportshm_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_setup function
+ */
+static inline int messageq_transportshm_ioctl_setup(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_transportshm_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct messageq_transportshm_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = messageq_transportshm_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_destroy function
+ */
+static inline int messageq_transportshm_ioctl_destroy(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ cargs->api_status = messageq_transportshm_destroy();
+ return 0;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_params_init function
+ */
+static inline int messageq_transportshm_ioctl_params_init(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct messageq_transportshm_params params;
+
+ messageq_transportshm_params_init(
+ cargs->args.params_init.messageq_transportshm_handle, &params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct messageq_transportshm_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_create function
+ */
+static inline int messageq_transportshm_ioctl_create(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_transportshm_params params;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct messageq_transportshm_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.create.shared_addr_srptr);
+ if (unlikely(params.shared_addr == NULL))
+ goto exit;
+
+ params.gate = cargs->args.create.knl_lock_handle;
+ params.notify_driver = cargs->args.create.knl_notify_driver;
+ cargs->args.create.messageq_transportshm_handle = \
+ messageq_transportshm_create(cargs->args.create.proc_id,
+ &params);
+
+ /*
+ * Here we are not validating the return from the module.
+ * Even it is NULL, we pass it to user and user has to pass
+ * proper return to application
+ */
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_ioctl_delete function
+ */
+static inline int messageq_transportshm_ioctl_delete(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ cargs->api_status = messageq_transportshm_delete(
+ &(cargs->args.delete_transport.messageq_transportshm_handle));
+ return 0;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_put ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_put function
+ */
+static inline int messageq_transportshm_ioctl_put(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ int status = 0;
+ messageq_msg msg;
+
+ msg = (messageq_msg) sharedregion_get_ptr(cargs->args.put.msg_srptr);
+ if (unlikely(msg == NULL))
+ goto exit;
+
+ status = messageq_transportshm_put(
+ cargs->args.put.messageq_transportshm_handle, msg);
+
+ cargs->api_status = status;
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_leave ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_leave function
+ */
+static inline int messageq_transportshm_ioctl_get_status(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ cargs->args.get_status.status = \
+ messageq_transportshm_get_status(
+ cargs->args.get_status.messageq_transportshm_handle);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl_shared_memreq ========
+ * Purpose:
+ * This ioctl interface to messageq_transportshm_shared_memreq function
+ */
+static inline int messageq_transportshm_ioctl_shared_memreq(
+ struct messageq_transportshm_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_transportshm_params params;
+
+ size = copy_from_user(&params, cargs->args.shared_memreq.params,
+ sizeof(struct messageq_transportshm_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->args.shared_memreq.bytes =
+ messageq_transportshm_shared_mem_req(&params);
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_transportshm_ioctl ========
+ * Purpose:
+ * ioctl interface function for messageq_transportshm
+ */
+int messageq_transportshm_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct messageq_transportshm_cmd_args __user *uarg =
+ (struct messageq_transportshm_cmd_args __user *)args;
+ s32 retval = 0;
+ struct messageq_transportshm_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct messageq_transportshm_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_MESSAGEQ_TRANSPORTSHM_GETCONFIG:
+ os_status = messageq_transportshm_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_SETUP:
+ os_status = messageq_transportshm_ioctl_setup(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_DESTROY:
+ os_status = messageq_transportshm_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_PARAMS_INIT:
+ retval = messageq_transportshm_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_CREATE:
+ os_status = messageq_transportshm_ioctl_create(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_DELETE:
+ os_status = messageq_transportshm_ioctl_delete(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_PUT:
+ os_status = messageq_transportshm_ioctl_put(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_GETSTATUS:
+ os_status = messageq_transportshm_ioctl_get_status(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_TRANSPORTSHM_SHAREDMEMREQ:
+ os_status = messageq_transportshm_ioctl_shared_memreq(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct messageq_transportshm_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+ return os_status;
+
+exit:
+ printk(KERN_ERR "messageq_transportshm_ioctl failed: status = 0x%x\n",
+ os_status);
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/multiproc.c b/drivers/dsp/syslink/multicore_ipc/multiproc.c
new file mode 100755
index 000000000000..17a3bce19113
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/multiproc.c
@@ -0,0 +1,242 @@
+/*
+* multiproc.c
+*
+* Many multi-processor modules have the concept of processor id. MultiProc
+* centeralizes the processor id management.
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+
+/*
+ * ======== multiproc.c ========
+ * Notes:
+ * The processor id start at 0 and ascend without skipping values till maximum_
+ * no_of_processors - 1
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <syslink/atomic_linux.h>
+/* Utilities headers */
+#include <linux/string.h>
+
+/* Module level headers */
+#include <multiproc.h>
+
+/* Macro to make a correct module magic number with ref_count */
+#define MULTIPROC_MAKE_MAGICSTAMP(x) ((MULTIPROC_MODULEID << 12u) | (x))
+
+/*
+ * multiproc module state object
+ */
+struct multiproc_module_object {
+ struct multiproc_config cfg; /* Module configuration structure */
+ struct multiproc_config def_cfg; /* Default module configuration */
+ atomic_t ref_count; /* Reference count */
+};
+
+static struct multiproc_module_object multiproc_state = {
+ .def_cfg.max_processors = 4,
+ .def_cfg.name_list[0][0] = "MPU",
+ .def_cfg.name_list[1][0] = "Tesla",
+ .def_cfg.name_list[2][0] = "SysM3",
+ .def_cfg.name_list[3][0] = "AppM3",
+ .def_cfg.id = 0
+};
+
+/*
+ * ======== multiproc_get_config ========
+ * Purpose:
+ * This will get the default configuration for the multiproc module
+ */
+void multiproc_get_config(struct multiproc_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ if (atomic_cmpmask_and_lt(
+ &(multiproc_state.ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true) {
+ /* (If setup has not yet been called) */
+ memcpy(cfg, &multiproc_state.def_cfg,
+ sizeof(struct multiproc_config));
+ } else {
+ memcpy(cfg, &multiproc_state.cfg,
+ sizeof(struct multiproc_config));
+ }
+}
+EXPORT_SYMBOL(multiproc_get_config);
+
+/*
+ * ======== multiproc_setup ========
+ * Purpose:
+ * This function sets up the multiproc module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked
+ */
+s32 multiproc_setup(struct multiproc_config *cfg)
+{
+ s32 status = 0;
+ struct multiproc_config tmp_cfg;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&multiproc_state.ref_count,
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&multiproc_state.ref_count)
+ != MULTIPROC_MAKE_MAGICSTAMP(1u)) {
+ status = 1;
+ } else {
+ if (cfg == NULL) {
+ multiproc_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ memcpy(&multiproc_state.cfg, cfg,
+ sizeof(struct multiproc_config));
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(multiproc_setup);
+
+/*
+ * ======== multiproc_setup ========
+ * Purpose:
+ * This function destroy the multiproc module.
+ * Once this function is called, other multiproc module APIs,
+ * except for the multiproc_get_config API cannot be called
+ * anymore.
+ */
+s32 multiproc_destroy(void)
+{
+ int status = 0;
+
+ if (atomic_cmpmask_and_lt(
+ &(multiproc_state.ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ atomic_dec_return(&multiproc_state.ref_count);
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(multiproc_destroy);
+
+/*
+ * ======== multiProc_set_local_id ========
+ * Purpose:
+ * This will set the processor id of local processor on run time
+ */
+int multiproc_set_local_id(u16 proc_id)
+{
+ int status = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_state.ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ multiproc_state.cfg.id = proc_id;
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(multiproc_set_local_id);
+
+/*
+ * ======== multiProc_get_local_id ========
+ * Purpose:
+ * This will get the processor id from proccessor name
+ */
+u16 multiproc_get_id(const char *proc_name)
+{
+ s32 i;
+ u16 proc_id = MULTIPROC_INVALIDID;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_state.ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ /* If the name is NULL, just return the local id */
+ if (proc_name == NULL)
+ proc_id = multiproc_state.cfg.id;
+ else {
+ for (i = 0; i < multiproc_state.cfg.max_processors ; i++) {
+ if (strcmp(proc_name,
+ &multiproc_state.cfg.name_list[i][0]) == 0) {
+ proc_id = i;
+ break;
+ }
+ }
+ }
+
+exit:
+ return proc_id;
+}
+EXPORT_SYMBOL(multiproc_get_id);
+
+/*
+ * ======== multiProc_set_local_id ========
+ * Purpose:
+ * This will get the processor name from proccessor id
+ */
+char *multiproc_get_name(u16 proc_id)
+{
+ char *proc_name = NULL;
+
+ /* On error condition return NULL pointer, else entry from name list */
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_state.ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(proc_id >= MULTIPROC_MAXPROCESSORS))
+ goto exit;
+
+ proc_name = multiproc_state.cfg.name_list[proc_id];
+
+exit:
+ return proc_name;
+}
+EXPORT_SYMBOL(multiproc_get_name);
+
+/*
+ * ======== multiProc_set_local_id ========
+ * Purpose:
+ * This will get the maximum proccessor id in the system
+ */
+u16 multiproc_get_max_processors(void)
+{
+ return multiproc_state.cfg.max_processors;
+
+}
+EXPORT_SYMBOL(multiproc_get_max_processors);
+
diff --git a/drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c b/drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c
new file mode 100755
index 000000000000..8f36304f3397
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c
@@ -0,0 +1,171 @@
+/*
+* multiproc_ioctl.c
+*
+* This provides the ioctl interface for multiproc module
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <multiproc.h>
+#include <multiproc_ioctl.h>
+
+/*
+ * ======== mproc_ioctl_setup ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to setup the module
+ */
+static int mproc_ioctl_setup(struct multiproc_cmd_args *cargs)
+{
+ struct multiproc_config config;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&config,
+ cargs->args.setup.config,
+ sizeof(struct multiproc_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = multiproc_setup(&config);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== mproc_ioctl_destroy ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to destroy the module
+ */
+static int mproc_ioctl_destroy(struct multiproc_cmd_args *cargs)
+{
+ cargs->api_status = multiproc_destroy();
+ return 0;
+}
+
+/*
+ * ======== mproc_ioctl_get_config ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to get the default configuration the module
+ */
+static int mproc_ioctl_get_config(struct multiproc_cmd_args *cargs)
+{
+ struct multiproc_config config;
+ u32 size;
+
+ multiproc_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct multiproc_config));
+ if (size) {
+ cargs->api_status = -EFAULT;
+ return 0;
+ }
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== mproc_ioctl_setup ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to setup the module
+ */
+static int multiproc_ioctl_set_local_id(struct multiproc_cmd_args *cargs)
+{
+ cargs->api_status = multiproc_set_local_id(cargs->args.set_local_id.id);
+ return 0;
+}
+
+/*
+ * ======== multiproc_ioctl ========
+ * Purpose:
+ * This ioctl interface for multiproc module
+ */
+int multiproc_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct multiproc_cmd_args __user *uarg =
+ (struct multiproc_cmd_args __user *)args;
+ struct multiproc_cmd_args cargs;
+
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct multiproc_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_MULTIPROC_SETUP:
+ status = mproc_ioctl_setup(&cargs);
+ break;
+
+ case CMD_MULTIPROC_DESTROY:
+ status = mproc_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_MULTIPROC_GETCONFIG:
+ status = mproc_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_MULTIPROC_SETLOCALID:
+ status = multiproc_ioctl_set_local_id(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct multiproc_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver.c b/drivers/dsp/syslink/multicore_ipc/nameserver.c
new file mode 100644
index 000000000000..12261f9b0506
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver.c
@@ -0,0 +1,1005 @@
+/*
+ * nameserver.c
+ *
+ * The nameserver module manages local name/value pairs that
+ * enables an application and other modules to store and retrieve
+ * values based on a name.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <syslink/atomic_linux.h>
+
+#include <nameserver.h>
+#include <multiproc.h>
+#include <nameserver_remote.h>
+
+#define NS_MAX_NAME_LEN 32
+#define NS_MAX_RUNTIME_ENTRY (~0)
+#define NS_MAX_VALUE_LEN 4
+
+/*
+ * The dynamic name/value table looks like the following. This approach allows
+ * each instance table to have different value and different name lengths.
+ * The names block is allocated on the create. The size of that block is
+ * (max_runtime_entries * max_name_en). That block is sliced and diced up and
+ * given to each table entry.
+ * The same thing is done for the values block.
+ *
+ * names table values
+ * ------------- ------------- -------------
+ * | |<-\ | elem | /----->| |
+ * | | \-------| name | / | |
+ * | | | value |-/ | |
+ * | | | len | | |
+ * | |<-\ |-----------| | |
+ * | | \ | elem | | |
+ * | | \------| name | /------>| |
+ * | | | value |-/ | |
+ * ------------- | len | | |
+ * ------------- | |
+ * | |
+ * | |
+ * -------------
+ *
+ * There is an optimization for small values (e.g. <= sizeof(UInt32).
+ * In this case, there is no values block allocated. Instead the value
+ * field is used directly. This optimization occurs and is managed when
+ * obj->max_value_len <= sizeof(Us3232).
+ *
+ * The static create is a little different. The static entries point directly
+ * to a name string (and value). Since it points directly to static items,
+ * this entries cannot be removed.
+ * If max_runtime_entries is non-zero, a names and values block is created.
+ * Here is an example of a table with 1 static entry and 2 dynamic entries
+ *
+ * ------------
+ * this entries cannot be removed.
+ * If max_runtime_entries is non-zero, a names and values block is created.
+ * Here is an example of a table with 1 static entry and 2 dynamic entries
+ *
+ * ------------
+ * | elem |
+ * "myName" <-----------| name |----------> someValue
+ * | value |
+ * names | len | values
+ * ------------- ------------- -------------
+ * | |<-\ | elem | /----->| |
+ * | | \-------| name | / | |
+ * | | | value |-/ | |
+ * | | | len | | |
+ * | |<-\ |-----------| | |
+ * | | \ | elem | | |
+ * | | \------| name | /------>| |
+ * | | | value |-/ | |
+ * ------------- | len | | |
+ * ------------- | |
+ * | |
+ * | |
+ * -------------
+ *
+ * NameServerD uses a freeList and namelist to maintain the empty
+ * and filled-in entries. So when a name/value pair is added, an entry
+ * is pulled off the freeList, filled-in and placed on the namelist.
+ * The reverse happens on a remove.
+ *
+ * For static adds, the entries are placed on the namelist statically.
+ *
+ * For dynamic creates, the freeList is populated in postInt and there are no
+ * entries placed on the namelist (this happens when the add is called).
+ *
+ */
+
+/* Macro to make a correct module magic number with refCount */
+#define NAMESERVER_MAKE_MAGICSTAMP(x) ((NAMESERVER_MODULEID << 12u) | (x))
+
+/*
+ * A name/value table entry
+ */
+struct nameserver_entry {
+ struct list_head elem; /* List element */
+ u32 hash; /* Hash value */
+ char *name; /* Name portion of name/value pair */
+ u32 len; /* Length of the value field. */
+ void *buf; /* Value portion of name/value entry */
+ bool collide; /* Does the hash collides? */
+ struct nameserver_entry *next; /* Pointer to the next entry,
+ used incase of collision only */
+};
+
+/*
+ * A nameserver instance object
+ */
+struct nameserver_object {
+ struct list_head elem;
+ char *name; /* Name of the instance */
+ u32 count; /* Counter for entries */
+ struct mutex *gate_handle; /* Gate for critical regions */
+ struct list_head name_list; /* Filled entries list */
+ struct nameserver_params params; /* The parameter structure */
+};
+
+
+/* nameserver module state object */
+struct nameserver_module_object {
+ struct list_head obj_list;
+ struct mutex *list_lock;
+ struct nameserver_remote_object **remote_handle_list;
+ atomic_t ref_count;
+};
+
+/*
+ * Variable for holding state of the nameserver module.
+ */
+struct nameserver_module_object nameserver_state = {
+ .obj_list = LIST_HEAD_INIT(nameserver_state.obj_list),
+ .list_lock = NULL,
+ .remote_handle_list = NULL,
+};
+
+/*
+ * Lookup table for CRC calculation.
+ */
+static const u32 string_crc_table[256u] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
+};
+
+/*
+ * ======== nameserver_setup ========
+ * Purpose:
+ * This will calculate hash for a string
+ */
+static u32 nameserver_string_hash(const char *string)
+{
+ u32 i;
+ u32 hash ;
+ u32 len = strlen(string);
+
+ for (i = 0, hash = len; i < len; i++)
+ hash = (hash >> 8) ^
+ string_crc_table[(hash & 0xff)] ^ string[i];
+
+ return hash;
+}
+
+/*
+ * ======== nameserver_setup ========
+ * Purpose:
+ * This will setup the nameserver module
+ */
+int nameserver_setup(void)
+{
+ struct nameserver_remote_object **list = NULL;
+ s32 retval = 0;
+ u16 nr_procs = 0;
+
+ /* This sets the ref_count variable if not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable
+ */
+ atomic_cmpmask_and_set(&nameserver_state.ref_count,
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&nameserver_state.ref_count)
+ != NAMESERVER_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ nr_procs = multiproc_get_max_processors();
+ list = kmalloc(nr_procs * sizeof(struct nameserver_remote_object *),
+ GFP_KERNEL);
+ if (list == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ memset(list , 0, nr_procs * sizeof(struct nameserver_remote_object *));
+ nameserver_state.remote_handle_list = list;
+ nameserver_state.list_lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (nameserver_state.list_lock == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ /* mutex is initialized with state = UNLOCKED */
+ mutex_init(nameserver_state.list_lock);
+ return 0;
+
+error:
+ kfree(list);
+ printk(KERN_ERR "nameserver_setup failed, retval: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_setup);
+
+/*
+ * ======== nameserver_destroy ========
+ * Purpose:
+ * This will destroy the nameserver module
+ */
+int nameserver_destroy(void)
+{
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(nameserver_state.ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&nameserver_state.ref_count)
+ == NAMESERVER_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ if (WARN_ON(nameserver_state.list_lock == NULL)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ /* If a nameserver instance exist, do not proceed */
+ if (!list_empty(&nameserver_state.obj_list)) {
+ retval = -EBUSY;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_state.list_lock);
+ if (retval)
+ goto exit;
+
+ lock = nameserver_state.list_lock;
+ nameserver_state.list_lock = NULL;
+ mutex_unlock(lock);
+ kfree(lock);
+ kfree(nameserver_state.remote_handle_list);
+ nameserver_state.remote_handle_list = NULL;
+ return 0;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_destroy failed, retval: %x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_destroy);
+
+/*!
+ * Purpose:
+ * Initialize this config-params structure with supplier-specified
+ * defaults before instance creation.
+ */
+int nameserver_params_init(struct nameserver_params *params)
+{
+ BUG_ON(params == NULL);
+ params->check_existing = true;
+ params->gate_handle = NULL;
+ params->max_name_len = NS_MAX_NAME_LEN;
+ params->max_runtime_entries = NS_MAX_RUNTIME_ENTRY;
+ params->max_value_len = NS_MAX_VALUE_LEN;
+ params->table_heap = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(nameserver_params_init);
+
+/*
+ * ======== nameserver_get_params ========
+ * Purpose:
+ * This will initialize config-params structure with
+ * supplier-specified defaults before instance creation
+ */
+int nameserver_get_params(void *handle,
+ struct nameserver_params *params)
+{
+ struct nameserver_object *nshandle = NULL;
+
+ BUG_ON(params == NULL);
+ if (handle == NULL) {
+ params->check_existing = true;
+ params->max_name_len = NS_MAX_NAME_LEN;
+ params->max_runtime_entries = NS_MAX_RUNTIME_ENTRY;
+ params->max_value_len = NS_MAX_VALUE_LEN;
+ params->gate_handle = NULL;
+ params->table_heap = NULL;
+ } else {
+ nshandle = (struct nameserver_object *)handle;
+ params->check_existing = nshandle->params.check_existing;
+ params->max_name_len = nshandle->params.max_name_len;
+ params->max_runtime_entries =
+ nshandle->params.max_runtime_entries;
+ params->max_value_len = nshandle->params.max_value_len;
+ params->gate_handle = nshandle->params.gate_handle;
+ params->table_heap = nshandle->params.table_heap;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(nameserver_get_params);
+
+/*
+ * ======== nameserver_get_params ========
+ * Purpose:
+ * This will get the handle of a nameserver instance
+ * from name
+ */
+void *nameserver_get_handle(const char *name)
+{
+ struct nameserver_object *obj = NULL;
+
+ BUG_ON(name == NULL);
+ list_for_each_entry(obj, &nameserver_state.obj_list, elem) {
+ if (strcmp(obj->name, name) == 0)
+ goto succes;
+ }
+ return NULL;
+
+succes:
+ return (void *)obj;
+}
+EXPORT_SYMBOL(nameserver_get_handle);
+
+/*
+ * ======== nameserver_create ========
+ * Purpose:
+ * This will create a name server instance
+ */
+void *nameserver_create(const char *name,
+ const struct nameserver_params *params)
+{
+ struct nameserver_object *new_obj = NULL;
+ u32 name_len;
+ s32 retval = 0;
+
+ BUG_ON(name == NULL);
+ BUG_ON(params == NULL);
+
+ name_len = strlen(name) + 1;
+ if (name_len > params->max_name_len) {
+ retval = -E2BIG;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_state.list_lock);
+ if (retval)
+ goto exit;
+
+ /* check if the name is already registered or not */
+ new_obj = nameserver_get_handle(name);
+ if (new_obj != NULL) {
+ retval = -EEXIST;
+ goto error_handle;
+ }
+
+ new_obj = kmalloc(sizeof(struct nameserver_object), GFP_KERNEL);
+ if (new_obj == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ new_obj->name = kmalloc(name_len, GFP_ATOMIC);
+ if (new_obj->name == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ strncpy(new_obj->name, name, name_len);
+ memcpy(&new_obj->params, params,
+ sizeof(struct nameserver_params));
+ if (params->max_value_len < sizeof(u32))
+ new_obj->params.max_value_len = sizeof(u32);
+ else
+ new_obj->params.max_value_len = params->max_value_len;
+
+ new_obj->gate_handle =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (new_obj->gate_handle == NULL) {
+ retval = -ENOMEM;
+ goto error_mutex;
+ }
+
+ mutex_init(new_obj->gate_handle);
+ new_obj->count = 0;
+ /* Put in the nameserver instance to local list */
+ INIT_LIST_HEAD(&new_obj->name_list);
+ list_add_tail(&new_obj->elem, &nameserver_state.obj_list);
+ mutex_unlock(nameserver_state.list_lock);
+ return (void *)new_obj;
+
+error_mutex:
+ kfree(new_obj->name);
+error:
+ kfree(new_obj);
+error_handle:
+ mutex_unlock(nameserver_state.list_lock);
+exit:
+ printk(KERN_ERR "nameserver_create failed retval:%x \n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(nameserver_create);
+
+
+/*
+ * ======== nameserver_delete ========
+ * Purpose:
+ * This will delete a name server instance
+ */
+int nameserver_delete(void **handle)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct mutex *gate_handle = NULL;
+ bool localgate = false;
+ s32 retval = 0;
+
+ BUG_ON(handle == NULL);
+ temp_obj = (struct nameserver_object *) (*handle);
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ /* Do not proceed if an entry in the in the table */
+ if (temp_obj->count != 0) {
+ retval = -EBUSY;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_state.list_lock);
+ if (retval)
+ goto error;
+
+ list_del(&temp_obj->elem);
+ mutex_unlock(nameserver_state.list_lock);
+ gate_handle = temp_obj->gate_handle;
+ /* free the memory allocated for instance name */
+ kfree(temp_obj->name);
+ /* Delete the lock handle if created internally */
+ if (temp_obj->params.gate_handle == NULL)
+ localgate = true;
+
+ /* Free the memory used for handle */
+ kfree(temp_obj);
+ *handle = NULL;
+ mutex_unlock(gate_handle);
+ if (localgate == true)
+ kfree(gate_handle);
+ return 0;
+
+error:
+ mutex_unlock(temp_obj->gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_delete failed retval:%x \n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_delete);
+
+/*
+ * ======== nameserver_is_entry_found ========
+ * Purpose:
+ * This will return true if the entry fond in the table
+ */
+static bool nameserver_is_entry_found(const char *name, u32 hash,
+ struct list_head *list,
+ struct nameserver_entry **entry)
+{
+ struct nameserver_entry *node = NULL;
+ bool hash_match = false;
+ bool name_match = false;
+
+
+ list_for_each_entry(node, list, elem) {
+ /* Hash not matchs, take next node */
+ if (node->hash == hash)
+ hash_match = true;
+ else
+ continue;
+ /* If the name matchs, incase hash is duplicate */
+ if (strcmp(node->name, name) == 0)
+ name_match = true;
+
+ if (hash_match && name_match) {
+ if (entry != NULL)
+ *entry = node;
+ return true;
+ }
+
+ hash_match = false;
+ name_match = false;
+ }
+ return false;
+}
+
+/*
+ * ======== nameserver_add ========
+ * Purpose:
+ * This will add an entry into a nameserver instance
+ */
+void *nameserver_add(void *handle, const char *name,
+ void *buffer, u32 length)
+{
+ struct nameserver_entry *new_node = NULL;
+ struct nameserver_object *temp_obj = NULL;
+ bool found = false;
+ u32 hash;
+ u32 name_len;
+ s32 retval = 0;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(name == NULL);
+ BUG_ON(buffer == NULL);
+ if (WARN_ON(length == 0)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ if (temp_obj->count >= temp_obj->params.max_runtime_entries) {
+ retval = -ENOSPC;
+ goto error;
+ }
+
+ /* make the null char in to account */
+ name_len = strlen(name) + 1;
+ if (name_len > temp_obj->params.max_name_len) {
+ retval = -E2BIG;
+ goto error;
+ }
+
+ /* TODO : hash and collide ?? */
+ hash = nameserver_string_hash(name);
+ found = nameserver_is_entry_found(name, hash,
+ &temp_obj->name_list, &new_node);
+ if (found == true) {
+ retval = -EEXIST;
+ goto error_entry;
+ }
+
+ new_node = kmalloc(sizeof(struct nameserver_entry), GFP_KERNEL);
+ if (new_node == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ new_node->hash = hash;
+ new_node->collide = true;
+ new_node->len = length;
+ new_node->next = NULL;
+ new_node->name = kmalloc(name_len, GFP_KERNEL);
+ if (new_node->name == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ new_node->buf = kmalloc(length, GFP_KERNEL);
+ if (new_node->buf == NULL) {
+ retval = -ENOMEM;
+ goto error1;
+ }
+
+ strncpy(new_node->name, name, name_len);
+ memcpy(new_node->buf, buffer, length);
+ list_add_tail(&new_node->elem, &temp_obj->name_list);
+ temp_obj->count++;
+ mutex_unlock(temp_obj->gate_handle);
+ return new_node;
+
+error1:
+ kfree(new_node->name);
+error:
+ kfree(new_node);
+error_entry:
+ mutex_unlock(temp_obj->gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_add failed status: %x \n", retval);
+ return NULL;
+
+}
+EXPORT_SYMBOL(nameserver_add);
+
+/*
+ * ======== nameserver_add_uint32 ========
+ * Purpose:
+ * This will a Uint32 value into a nameserver instance
+ */
+void *nameserver_add_uint32(void *handle, const char *name,
+ u32 value)
+{
+ struct nameserver_entry *new_node = NULL;
+ BUG_ON(handle == NULL);
+ BUG_ON(name == NULL);
+
+ new_node = nameserver_add(handle, name, &value, sizeof(u32));
+ return new_node;
+}
+EXPORT_SYMBOL(nameserver_add_uint32);
+
+/*
+ * ======== nameserver_remove ========
+ * Purpose:
+ * This will remove a name/value pair from a name server
+ */
+int nameserver_remove(void *handle, const char *name)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct nameserver_entry *entry = NULL;
+ bool found = false;
+ u32 hash;
+ u32 name_len;
+ s32 retval = 0;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(name == NULL);
+
+ temp_obj = (struct nameserver_object *)handle;
+ name_len = strlen(name) + 1;
+ if (name_len > temp_obj->params.max_name_len) {
+ retval = -E2BIG;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ /* TODO :check collide & hash usage */
+ hash = nameserver_string_hash(name);
+ found = nameserver_is_entry_found(name, hash,
+ &temp_obj->name_list, &entry);
+ if (found == false) {
+ retval = -ENOENT;
+ goto error;
+ }
+
+ kfree(entry->buf);
+ kfree(entry->name);
+ list_del(&entry->elem);
+ kfree(entry);
+ temp_obj->count--;
+ mutex_unlock(temp_obj->gate_handle);
+ return 0;
+
+error:
+ mutex_unlock(temp_obj->gate_handle);
+
+exit:
+ printk(KERN_ERR "nameserver_remove failed status:%x \n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remove);
+
+/*
+ * ======== nameserver_remove_entry ========
+ * Purpose:
+ * This will remove a name/value pair from a name server
+ */
+int nameserver_remove_entry(void *nshandle, void *nsentry)
+{
+ struct nameserver_entry *node = NULL;
+ struct nameserver_object *handle = NULL;
+ s32 retval = 0;
+
+ BUG_ON(nshandle == NULL);
+ BUG_ON(nsentry == NULL);
+
+ handle = (struct nameserver_object *)nshandle;
+ node = (struct nameserver_entry *)nsentry;
+ retval = mutex_lock_interruptible(handle->gate_handle);
+ if (retval)
+ goto exit;
+
+ kfree(node->buf);
+ kfree(node->name);
+ list_del(&node->elem);
+ kfree(node);
+ handle->count--;
+ mutex_unlock(handle->gate_handle);
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_remove_entry failed status:%x \n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remove_entry);
+
+
+/*
+ * ======== nameserver_get_local ========
+ * Purpose:
+ * This will retrieve the value portion of a name/value
+ * pair from local table
+ */
+int nameserver_get_local(void *handle, const char *name,
+ void *buffer, u32 length)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct nameserver_entry *entry = NULL;
+ bool found = false;
+ u32 hash;
+ s32 retval = 0;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(name == NULL);
+ BUG_ON(buffer == NULL);
+ if (WARN_ON(length == 0)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ /* TODO :check collide & hash usage */
+ hash = nameserver_string_hash(name);
+ found = nameserver_is_entry_found(name, hash,
+ &temp_obj->name_list, &entry);
+ if (found == false) {
+ retval = -ENOENT;
+ goto error;
+ }
+
+ if (entry->len >= length) {
+ memcpy(buffer, entry->buf, length);
+ retval = length;
+ } else {
+ memcpy(buffer, entry->buf, entry->len);
+ retval = entry->len;
+ }
+
+ mutex_unlock(temp_obj->gate_handle);
+ return retval;
+
+error:
+ mutex_unlock(temp_obj->gate_handle);
+
+exit:
+ printk(KERN_ERR "nameserver_get_local entry not found!\n");
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_get_local);
+
+/*
+ * ======== nameserver_get ========
+ * Purpose:
+ * This will etrieve the value portion of a name/value
+ * pair from local table
+ */
+int nameserver_get(void *handle, const char *name,
+ void *buffer, u32 length, u16 proc_id[])
+{
+ struct nameserver_object *temp_obj = NULL;
+ u16 max_proc_id;
+ u16 local_proc_id;
+ s32 retval = -ENOENT;
+ u32 i;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(name == NULL);
+ BUG_ON(buffer == NULL);
+ if (WARN_ON(length == 0)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ max_proc_id = multiproc_get_max_processors();
+ local_proc_id = multiproc_get_id(NULL);
+ if (proc_id == NULL) {
+ retval = nameserver_get_local(temp_obj, name,
+ buffer, length);
+ if (retval > 0) /* Got the value */
+ goto exit;
+
+ for (i = 0; i < max_proc_id; i++) {
+ /* Skip current processor */
+ if (i == local_proc_id)
+ continue;
+
+ if (nameserver_state.remote_handle_list[i] == NULL)
+ continue;
+
+ retval = nameserver_remote_get(
+ nameserver_state.remote_handle_list[i],
+ temp_obj->name, name, buffer, length);
+ if (retval > 0 || ((retval < 0) &&
+ (retval != -ENOENT))) /* Got the value */
+ break;
+ }
+ goto exit;
+ }
+
+ for (i = 0; i < max_proc_id; i++) {
+ /* Skip processor with invalid id */
+ if (proc_id[i] == MULTIPROC_INVALIDID)
+ continue;
+
+ if (i == local_proc_id) {
+ retval = nameserver_get_local(temp_obj,
+ name, buffer, length);
+ if (retval > 0)
+ break;
+
+ } else {
+ retval = nameserver_remote_get(
+ nameserver_state.remote_handle_list[proc_id[i]],
+ temp_obj->name, name, buffer, length);
+ if (retval > 0 || ((retval < 0) &&
+ (retval != -ENOENT))) /* Got the value */
+ break;
+ }
+ }
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "nameserver_get failed: status=%x \n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_get);
+
+/*
+ * ======== nameserver_get ========
+ * Purpose:
+ * This will etrieve the value portion of a name/value
+ * pair from local table
+ *
+ * Returns the number of characters that matched with an entry
+ * So if "abc" was an entry and you called match with "abcd", this
+ * function will have the "abc" entry. The return would be 3 since
+ * three characters matched
+ *
+ */
+int nameserver_match(void *handle, const char *name, u32 *value)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct nameserver_entry *node = NULL;
+ s32 retval = 0;
+ u32 hash;
+ bool found = false;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(name == NULL);
+ BUG_ON(value == NULL);
+
+ temp_obj = (struct nameserver_object *)handle;
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ hash = nameserver_string_hash(name);
+ list_for_each_entry(node, &temp_obj->name_list, elem) {
+ if (node->hash == hash) {
+ *value = *(u32 *)node->buf;
+ found = true;
+ }
+ }
+
+ if (found == false)
+ retval = -ENOENT;
+
+ mutex_unlock(temp_obj->gate_handle);
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "nameserver_match failed status:%x \n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_match);
+
+/*
+ * ======== nameserver_register_remote_driver ========
+ * Purpose:
+ * This will register a remote driver for a processor
+ */
+int nameserver_register_remote_driver(void *handle, u16 proc_id)
+{
+ struct nameserver_remote_object *temp = NULL;
+ s32 retval = 0;
+ u16 proc_count;
+
+ BUG_ON(handle == NULL);
+ proc_count = multiproc_get_max_processors();
+ if (WARN_ON(proc_id >= proc_count)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp = (struct nameserver_remote_object *)handle;
+ nameserver_state.remote_handle_list[proc_id] = temp;
+ return 0;
+
+exit:
+ printk(KERN_ERR
+ "nameserver_register_remote_driver failed status:%x \n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_register_remote_driver);
+
+/*
+ * ======== nameserver_unregister_remote_driver ========
+ * Purpose:
+ * This will unregister a remote driver for a processor
+ */
+int nameserver_unregister_remote_driver(u16 proc_id)
+{
+ s32 retval = 0;
+ u16 proc_count;
+
+ proc_count = multiproc_get_max_processors();
+ if (WARN_ON(proc_id >= proc_count)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ nameserver_state.remote_handle_list[proc_id] = NULL;
+ return 0;
+
+exit:
+ printk(KERN_ERR
+ "nameserver_unregister_remote_driver failed status:%x \n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_unregister_remote_driver);
+
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c b/drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c
new file mode 100755
index 000000000000..1564fb61fd26
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c
@@ -0,0 +1,597 @@
+/*
+* nameserver_ioctl.c
+*
+* This provides the ioctl interface for nameserver module
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <nameserver.h>
+#include <nameserver_ioctl.h>
+
+/*
+ * FUNCTIONS NEED TO BE REVIEWED OPTIMIZED!
+ */
+
+/*
+ * ======== nameserver_ioctl_setup ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * setup nameserver module
+ */
+static int nameserver_ioctl_setup(
+ struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_setup();
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_destroy ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * destroy nameserver module
+ */
+static int nameserver_ioctl_destroy(
+ struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_destroy();
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_params_init ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * get the default configuration of a nameserver instance
+ */
+static int nameserver_ioctl_params_init(struct nameserver_cmd_args *cargs)
+{
+ struct nameserver_params params;
+ s32 status = 0;
+ ulong size;
+
+ cargs->api_status = nameserver_params_init(&params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct nameserver_params));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl_get_handle ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * get the handle of a nameserver instance from name
+ */
+static int nameserver_ioctl_get_handle(struct nameserver_cmd_args *cargs)
+{
+ void *handle = NULL;
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.get_handle.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.get_handle.name,
+ cargs->args.get_handle.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ handle = nameserver_get_handle(name);
+ cargs->args.get_handle.handle = handle;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl_create ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * create a name server instance
+ */
+static int nameserver_ioctl_create(struct nameserver_cmd_args *cargs)
+{
+ struct nameserver_params params;
+ void *handle = NULL;
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.create.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.create.name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto copy_from_usr_error;
+ }
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct nameserver_params));
+ if (size) {
+ status = -EFAULT;
+ goto copy_from_usr_error;
+ }
+
+ handle = nameserver_create(name, &params);
+ cargs->args.create.handle = handle;
+ cargs->api_status = 0;
+
+copy_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_delete ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * delete a name server instance
+ */
+static int nameserver_ioctl_delete(struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status =
+ nameserver_delete(&cargs->args.delete_instance.handle);
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_add ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * add an entry into a nameserver instance
+ */
+static int nameserver_ioctl_add(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ char *buf = NULL;
+ void *entry;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.add.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.add.name,
+ cargs->args.add.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ buf = kmalloc(cargs->args.add.len, GFP_KERNEL);
+ if (buf == NULL) {
+ status = -ENOMEM;
+ goto buf_alloc_error;
+ }
+
+ size = copy_from_user(buf, cargs->args.add.buf,
+ cargs->args.add.len);
+ if (size) {
+ status = -EFAULT;
+ goto buf_from_usr_error;
+ }
+
+ entry = nameserver_add(cargs->args.add.handle, name, buf,
+ cargs->args.add.len);
+ cargs->args.add.entry = entry;
+ cargs->args.add.node = entry;
+ cargs->api_status = 0;
+
+buf_from_usr_error:
+ kfree(buf);
+
+buf_alloc_error: /* Fall through */
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_add_uint32 ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * add a Uint32 entry into a nameserver instance
+ */
+static int nameserver_ioctl_add_uint32(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ void *entry;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.addu32.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.addu32.name,
+ cargs->args.addu32.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ entry = nameserver_add_uint32(cargs->args.addu32.handle, name,
+ cargs->args.addu32.value);
+ cargs->args.addu32.entry = entry;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_match ========
+ * Purpose:
+ * This wrapper function will call the nameserver function
+ * to retrieve the value portion of a name/value
+ * pair from local table
+ */
+static int nameserver_ioctl_match(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ u32 buf;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.match.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.match.name,
+ cargs->args.match.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ cargs->api_status =
+ nameserver_match(cargs->args.match.handle, name, &buf);
+ size = copy_to_user(cargs->args.match.value, &buf, sizeof(u32 *));
+ if (size) {
+ status = -EFAULT;
+ goto buf_to_usr_error;
+ }
+
+buf_to_usr_error: /* Fall through */
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl_remove ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * remove a name/value pair from a name server
+ */
+static int nameserver_ioctl_remove(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.remove.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.remove.name,
+ cargs->args.remove.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ cargs->api_status =
+ nameserver_remove(cargs->args.remove.handle, name);
+
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_remove_entry ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * remove an entry from a name server
+ */
+static int nameserver_ioctl_remove_entry(struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_remove_entry(
+ cargs->args.remove_entry.handle,
+ cargs->args.remove_entry.entry);
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_get_local ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * retrieve the value portion of a name/value pair from local table
+ */
+static int nameserver_ioctl_get_local(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ char *buf = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.get_local.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ buf = kmalloc(cargs->args.get_local.len, GFP_KERNEL);
+ if (buf == NULL) {
+ status = -ENOMEM;
+ goto buf_alloc_error;
+ }
+
+ size = copy_from_user(name, cargs->args.get_local.name,
+ cargs->args.get_local.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ cargs->api_status = nameserver_get_local(
+ cargs->args.get_local.handle, name,
+ buf, cargs->args.get_local.len);
+ size = copy_to_user(cargs->args.get_local.buf, buf,
+ cargs->args.get_local.len);
+ if (size)
+ status = -EFAULT;
+
+name_from_usr_error:
+ kfree(buf);
+
+buf_alloc_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_get ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * retrieve the value portion of a name/value pair from table
+ */
+static int nameserver_ioctl_get(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ char *buf = NULL;
+ u16 *proc_id = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.get.name_len + 1, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ buf = kmalloc(cargs->args.get.len, GFP_KERNEL);
+ if (buf == NULL) {
+ status = -ENOMEM;
+ goto buf_alloc_error;
+ }
+
+ proc_id = kmalloc(cargs->args.get.proc_len, GFP_KERNEL);
+ if (proc_id == NULL) {
+ status = -ENOMEM;
+ goto proc_alloc_error;
+ }
+
+ size = copy_from_user(name, cargs->args.get.name,
+ cargs->args.get.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ status = copy_from_user(proc_id, cargs->args.get.proc_id,
+ cargs->args.get.proc_len);
+ if (size) {
+ status = -EFAULT;
+ goto proc_from_usr_error;
+ }
+
+ cargs->api_status = nameserver_get(cargs->args.get.handle, name, buf,
+ cargs->args.get.len, proc_id);
+ size = copy_to_user(cargs->args.get.buf, buf,
+ cargs->args.get.len);
+ if (size)
+ status = -EFAULT;
+
+
+proc_from_usr_error: /* Fall through */
+name_from_usr_error:
+ kfree(proc_id);
+
+proc_alloc_error:
+ kfree(buf);
+
+buf_alloc_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl ========
+ * Purpose:
+ * This ioctl interface for nameserver module
+ */
+int nameserver_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct nameserver_cmd_args __user *uarg =
+ (struct nameserver_cmd_args __user *)args;
+ struct nameserver_cmd_args cargs;
+
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct nameserver_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_NAMESERVER_ADD:
+ status = nameserver_ioctl_add(&cargs);
+ break;
+
+ case CMD_NAMESERVER_ADDUINT32:
+ status = nameserver_ioctl_add_uint32(&cargs);
+ break;
+
+ case CMD_NAMESERVER_GET:
+ status = nameserver_ioctl_get(&cargs);
+ break;
+
+ case CMD_NAMESERVER_GETLOCAL:
+ status = nameserver_ioctl_get_local(&cargs);
+ break;
+
+ case CMD_NAMESERVER_MATCH:
+ status = nameserver_ioctl_match(&cargs);
+ break;
+
+ case CMD_NAMESERVER_REMOVE:
+ status = nameserver_ioctl_remove(&cargs);
+ break;
+
+ case CMD_NAMESERVER_REMOVEENTRY:
+ status = nameserver_ioctl_remove_entry(&cargs);
+ break;
+
+ case CMD_NAMESERVER_PARAMS_INIT:
+ status = nameserver_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_NAMESERVER_CREATE:
+ status = nameserver_ioctl_create(&cargs);
+ break;
+
+ case CMD_NAMESERVER_DELETE:
+ status = nameserver_ioctl_delete(&cargs);
+ break;
+
+ case CMD_NAMESERVER_GETHANDLE:
+ status = nameserver_ioctl_get_handle(&cargs);
+ break;
+
+ case CMD_NAMESERVER_SETUP:
+ status = nameserver_ioctl_setup(&cargs);
+ break;
+
+ case CMD_NAMESERVER_DESTROY:
+ status = nameserver_ioctl_destroy(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct nameserver_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_remote.c b/drivers/dsp/syslink/multicore_ipc/nameserver_remote.c
new file mode 100644
index 000000000000..43227996266a
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_remote.c
@@ -0,0 +1,49 @@
+/*
+ * nameserver_remote.c
+ *
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include <nameserver_remote.h>
+
+/*
+ * ======== nameserver_remote_get ========
+ * Purpose:
+ * This will get data from remote name server
+ */
+int nameserver_remote_get(const struct nameserver_remote_object *handle,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len)
+{
+ s32 retval = 0;
+
+ if (handle == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON((instance_name == NULL) || (name == NULL)
+ || (value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = handle->get(handle, instance_name,
+ name, value, value_len, NULL);
+
+exit:
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c b/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c
new file mode 100755
index 000000000000..68cdec953b5b
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c
@@ -0,0 +1,721 @@
+/*
+ * nameserver_remotenotify.c
+ *
+ * The nameserver_remotenotify module provides functionality to get name
+ * value pair from a remote nameserver.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+
+/* Module level headers */
+#include <gate_remote.h>
+#include <gatepeterson.h>
+#include <nameserver.h>
+#include <multiproc.h>
+#include <nameserver_remotenotify.h>
+#include <notify.h>
+
+
+/*
+ * Macro to make a correct module magic number with refCount
+ */
+#define NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(x) \
+ ((NAMESERVERREMOTENOTIFY_MODULEID << 12u) | (x))
+
+/*
+ * Cache line length
+ * TODO: Short-term hack. Make parameter or figure out some other way!
+ */
+#define NAMESERVERREMOTENOTIFY_CACHESIZE 128
+
+/*
+ * Maximum length of value buffer that can be stored in the NameServer
+ * managed by this NameServerRemoteNotify instance.
+ */
+#define NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN 300
+
+/* Defines the nameserver_remotenotify state object, which contains all the
+ * module specific information
+ */
+struct nameserver_remotenotify_module_object {
+ struct nameserver_remotenotify_config cfg;
+ struct nameserver_remotenotify_config def_cfg;
+ struct nameserver_remotenotify_params def_inst_params;
+ bool is_setup;
+ void *gate_handle;
+ atomic_t ref_count;
+};
+
+/*
+ * NameServer remote transport state attributes
+ */
+struct nameserver_remotenotify_attrs {
+ u32 version;
+ u32 status;
+ u32 shared_addr_size;
+};
+
+/*
+ * NameServer remote transport packet definition
+ */
+struct nameserver_remotenotify_message {
+ u32 request;
+ u32 response;
+ u32 request_status;
+ u32 value;
+ u32 value_len;
+ char instance_name[32];
+ char name[32];
+ char value_buf[NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN];
+};
+
+/*
+ * NameServer remote transport state object definition
+ */
+struct nameserver_remotenotify_obj {
+ struct nameserver_remotenotify_attrs *attrs;
+ struct nameserver_remotenotify_message *msg[2];
+ struct nameserver_remotenotify_params params;
+ u16 remote_proc_id;
+ struct mutex *local_gate;
+ struct semaphore *sem_handle; /* Binary semaphore */
+};
+
+/*
+ * NameServer remote transport state object definition
+ */
+struct nameserver_remotenotify_object {
+ int (*get)(void *,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len, void *reserved);
+ void *obj; /* Implementation specific object */
+};
+
+/*
+ * nameserver_remotenotify state object variable
+ */
+static struct nameserver_remotenotify_module_object
+ nameserver_remotenotify_state = {
+ .is_setup = false,
+ .gate_handle = NULL,
+ .def_cfg.reserved = 0,
+ .def_inst_params.gate = NULL,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.shared_addr_size = 0x0,
+ .def_inst_params.notify_event_no = (u32) -1,
+ .def_inst_params.notify_driver = NULL,
+};
+
+/*
+ * ======== nameserver_remotenotify_get_config ========
+ * Purpose:
+ * This will get the default configuration for the nameserver remote
+ * module
+ * This function can be called by the application to get their
+ * configuration parameter to nameserver_remotenotify_setup filled
+ * in by the nameserver_remotenotify module with the default
+ * parameters. If the user does not wish to make any change in the
+ * default parameters, this API is not required to be called
+ */
+void nameserver_remotenotify_get_config(
+ struct nameserver_remotenotify_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ if (nameserver_remotenotify_state.is_setup == false)
+ memcpy(cfg, &(nameserver_remotenotify_state.def_cfg),
+ sizeof(struct nameserver_remotenotify_config));
+ else
+ memcpy(cfg, &(nameserver_remotenotify_state.cfg),
+ sizeof(struct nameserver_remotenotify_config));
+}
+
+
+/*
+ * ======== nameserver_remotenotify_setup ========
+ * Purpose:
+ * This will setup the nameserver_remotenotify module
+ * This function sets up the nameserver_remotenotify module. This
+ * function must be called before any other instance-level APIs can
+ * be invoked
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then nameserver_remotenotify_get_config can be called
+ * to get the configuration filled with the default values. After
+ * this, only the required configuration values can be changed. If
+ * the user does not wish to make any change in the default
+ * parameters, the application can simply call
+ * nameserver_remotenotify_setup with NULL parameters. The default
+ * parameters would get automatically used
+ */
+int nameserver_remotenotify_setup(
+ struct nameserver_remotenotify_config *cfg)
+{
+ struct nameserver_remotenotify_config tmp_cfg;
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+ bool user_cfg = true;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&nameserver_remotenotify_state.ref_count,
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&nameserver_remotenotify_state.ref_count)
+ != NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ nameserver_remotenotify_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ user_cfg = false;
+ }
+
+ /* Create a default gate handle for local module protection */
+ lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (lock == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ mutex_init(lock);
+ nameserver_remotenotify_state.gate_handle = lock;
+ if (user_cfg)
+ memcpy(&nameserver_remotenotify_state.cfg, cfg,
+ sizeof(struct nameserver_remotenotify_config));
+
+ nameserver_remotenotify_state.is_setup = true;
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== nameserver_remotenotify_destroy ========
+ * Purpose:
+ * This will destroy the nameserver_remotenotify module.
+ * Once this function is called, other nameserver_remotenotify
+ * module APIs, except for the nameserver_remotenotify_get_config
+ * API cannot be called anymore.
+ */
+int nameserver_remotenotify_destroy(void)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&nameserver_remotenotify_state.ref_count)
+ == NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ if (nameserver_remotenotify_state.gate_handle != NULL)
+ kfree(nameserver_remotenotify_state.gate_handle);
+
+ nameserver_remotenotify_state.is_setup = false;
+ return 0;
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== ameserver_remotenotify_params_init ========
+ * Purpose:
+ * This will get the current configuration values
+ */
+void nameserver_remotenotify_params_init(void *handle,
+ struct nameserver_remotenotify_params *params)
+{
+ struct nameserver_remotenotify_object *object = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+
+ if (atomic_cmpmask_and_lt(&(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ printk(KERN_ERR "nameserver_remotenotify_params_init failed: "
+ "Module is not initialized!\n");
+ return;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "nameserver_remotenotify_params_init failed: "
+ "Argument of type(nameserver_remotenotify_params *) "
+ "is NULL!\n");
+ return;
+ }
+
+ object = (struct nameserver_remotenotify_object *)handle;
+ if (handle == NULL)
+ memcpy(params,
+ &(nameserver_remotenotify_state.def_inst_params),
+ sizeof(struct nameserver_remotenotify_params));
+ else {
+ obj = (struct nameserver_remotenotify_obj *)object->obj;
+ /* Return updated nameserver_remotenotify instance specific
+ * parameters.
+ */
+ memcpy(params, &(obj->params),
+ sizeof(struct nameserver_remotenotify_params));
+ }
+}
+
+
+/*
+ * ======== nameserver_remotenotify_callback ========
+ * Purpose:
+ * This will be called when a notify event is received
+ */
+void nameserver_remotenotify_callback(u16 proc_id, u32 event_no,
+ void *arg, u32 payload)
+{
+ struct nameserver_remotenotify_obj *handle = NULL;
+ u32 proc_count;
+ u16 offset = 0;
+ void *nshandle = NULL;
+ u32 value_len;
+ u32 key;
+ s32 retval = 0;
+ s32 count = -ENOENT;
+
+ if (atomic_cmpmask_and_lt(&(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(arg == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ if (WARN_ON(proc_id >= proc_count)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct nameserver_remotenotify_obj *)arg;
+ if ((multiproc_get_id(NULL) > proc_id))
+ offset = 1;
+
+ if (handle->msg[1 - offset]->request != true)
+ goto signal_response;
+
+ /* This is a request */
+ value_len = handle->msg[1 - offset]->value_len;
+ nshandle = nameserver_get_handle(
+ handle->msg[1 - offset]->instance_name);
+ if (nshandle != NULL) {
+ /* Search for the NameServer entry */
+ if (value_len == sizeof(u32)) {
+ count = nameserver_get_local(nshandle,
+ handle->msg[1 - offset]->name,
+ &handle->msg[1 - offset]->value,
+ value_len);
+ } else {
+ count = nameserver_get_local(nshandle,
+ handle->msg[1 - offset]->name,
+ &handle->msg[1 - offset]->value_buf,
+ value_len);
+ }
+ }
+
+ key = gatepeterson_enter(handle->params.gate);
+ /* If retval > 0 then an entry found */
+ if (count != -ENOENT)
+ handle->msg[1 - offset]->request_status = true;
+
+ /* Send a response back */
+ handle->msg[1 - offset]->response = true;
+ handle->msg[1 - offset]->request = false;
+ /* now we can leave the gate */
+ gatepeterson_leave(handle->params.gate, key);
+
+ /*
+ * The NotifyDriver handle must exists at this point,
+ * otherwise the notify_sendEvent should have failed
+ */
+ retval = notify_sendevent(handle->params.notify_driver,
+ proc_id, event_no, 0, true);
+
+signal_response:
+ if (handle->msg[offset]->response == true)
+ up(handle->sem_handle);
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_remotenotify_callback failed! "
+ "status = 0x%x\n", retval);
+ }
+ return;
+}
+
+/*
+ * ======== nameserver_remotenotify_get ========
+ * Purpose:
+ * This will get a remote name value pair
+ */
+int nameserver_remotenotify_get(void *rhandle,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len, void *reserved)
+{
+ struct nameserver_remotenotify_object *handle = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+ s32 offset = 0;
+ s32 len;
+ u32 key;
+ s32 retval = 0;
+
+ BUG_ON(instance_name == NULL);
+ BUG_ON(name == NULL);
+ BUG_ON(value == NULL);
+ BUG_ON((value_len <= 0) || \
+ (value_len > NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN));
+
+ if (atomic_cmpmask_and_lt(&(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(rhandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if ((value_len == 0) || \
+ (value_len > NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct nameserver_remotenotify_object *)rhandle;
+ obj = (struct nameserver_remotenotify_obj *)handle->obj;
+ if (multiproc_get_id(NULL) > obj->remote_proc_id)
+ offset = 1;
+
+ key = gatepeterson_enter(obj->params.gate);
+ /* This is a request message */
+ obj->msg[offset]->request = 1;
+ obj->msg[offset]->response = 0;
+ obj->msg[offset]->request_status = 0;
+ obj->msg[offset]->value_len = value_len;
+ len = strlen(instance_name) + 1; /* Take termination null char */
+ if (len >= 32) {
+ retval = -EINVAL;
+ goto inval_len_error;
+ }
+ strncpy(obj->msg[offset]->instance_name, instance_name, len);
+ len = strlen(name);
+ if (len >= 32) {
+ retval = -EINVAL;
+ goto inval_len_error;
+ }
+ strncpy(obj->msg[offset]->name, name, len);
+
+ /* Send the notification to remote processor */
+ retval = notify_sendevent(obj->params.notify_driver,
+ obj->remote_proc_id,
+ obj->params.notify_event_no,
+ 0, /* Payload */
+ false); /* Not sending a payload */
+ if (retval < 0) {
+ /* Undo previous operations */
+ obj->msg[offset]->request = 0;
+ obj->msg[offset]->value_len = 0;
+ goto notify_error;
+ }
+
+ gatepeterson_leave(obj->params.gate, key);
+
+ /* Pend on the semaphore */
+ retval = down_interruptible(obj->sem_handle);
+ if (retval) {
+ goto exit;
+ }
+
+ key = gatepeterson_enter(obj->params.gate);
+ if (obj->msg[offset]->request_status != true) {
+ retval = -ENOENT;
+ goto request_error;
+ }
+
+ if (!value_len) {
+ retval = -ENOENT;
+ goto request_error;
+ }
+
+ if (value_len == sizeof(u32))
+ memcpy((void *)value, (void *) &(obj->msg[offset]->value),
+ sizeof(u32));
+ else
+ memcpy((void *)value, (void *)&(obj->msg[offset]->value_buf),
+ value_len);
+
+ obj->msg[offset]->request_status = false;
+ obj->msg[offset]->request = 0;
+ obj->msg[offset]->response = 0;
+ retval = value_len;
+
+inval_len_error:
+notify_error:
+request_error:
+ gatepeterson_leave(obj->params.gate, key);
+exit:
+ return retval;
+}
+
+/*
+ * ======== nameServer_remote_notify_params_init ========
+ * Purpose:
+ * This will get the current configuration values
+ */
+int nameServer_remote_notify_params_init(
+ struct nameserver_remotenotify_params *params)
+{
+ BUG_ON(params == NULL);
+
+ params->notify_event_no = 0;
+ params->notify_driver = NULL;
+ params->shared_addr = NULL;
+ params->shared_addr_size = 0;
+ params->gate = NULL;
+ return 0;
+}
+
+/*
+ * ======== nameserver_remotenotify_create ========
+ * Purpose:
+ * This will setup the nameserver remote module
+ */
+void *nameserver_remotenotify_create(u16 proc_id,
+ const struct nameserver_remotenotify_params *params)
+{
+ struct nameserver_remotenotify_object *handle = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+ u16 proc_count;
+ s32 retval = 0;
+ s32 retval1 = 0;
+ u32 offset = 0;
+
+ if (atomic_cmpmask_and_lt(&(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON(params->notify_driver == NULL ||
+ params->shared_addr == NULL ||
+ params->shared_addr_size == 0)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ if (proc_id >= proc_count) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = kmalloc(sizeof(struct nameserver_remotenotify_obj), GFP_KERNEL);
+ handle = kmalloc(sizeof(struct nameserver_remotenotify_object),
+ GFP_KERNEL);
+ if (obj == NULL || handle == NULL) {
+ retval = -ENOMEM;
+ goto mem_error;
+ }
+
+ handle->get = nameserver_remotenotify_get;
+ handle->obj = (void *)obj;
+ obj->local_gate = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (obj->local_gate == NULL) {
+ retval = -ENOMEM;
+ goto mem_error;
+ }
+
+ obj->remote_proc_id = proc_id;
+ if (multiproc_get_id(NULL) > proc_id)
+ offset = 1;
+
+ obj->attrs = (struct nameserver_remotenotify_attrs *)
+ params->shared_addr;
+ obj->msg[0] = (struct nameserver_remotenotify_message *)
+ ((u32)obj->attrs +
+ NAMESERVERREMOTENOTIFY_CACHESIZE);
+ obj->msg[1] = (struct nameserver_remotenotify_message *)
+ ((u32)obj->msg[0] +
+ sizeof(struct
+ nameserver_remotenotify_message));
+ /* Clear out self shared structures */
+ memset(obj->msg[offset], 0,
+ sizeof(struct nameserver_remotenotify_message));
+ memcpy(&obj->params, params,
+ sizeof(struct nameserver_remotenotify_params));
+ retval = notify_register_event(params->notify_driver, proc_id,
+ params->notify_event_no,
+ nameserver_remotenotify_callback,
+ (void *)obj);
+ if (retval < 0)
+ goto notify_error;
+
+ retval = nameserver_register_remote_driver((void *)handle, proc_id);
+ obj->sem_handle = kmalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (obj->sem_handle == NULL) {
+ retval = -ENOMEM;
+ goto sem_alloc_error;
+ }
+
+ sema_init(obj->sem_handle, 0);
+ /* its is at the end since its init state = unlocked? */
+ mutex_init(obj->local_gate);
+ return (void *)handle;
+
+sem_alloc_error:
+ nameserver_unregister_remote_driver(proc_id);
+ /* Do we want to check the staus ? */
+ retval1 = notify_unregister_event(obj->params.notify_driver,
+ obj->remote_proc_id,
+ obj->params.notify_event_no,
+ nameserver_remotenotify_callback,
+ (void *)obj);
+
+notify_error:
+ kfree(obj->local_gate);
+
+mem_error:
+ kfree(obj);
+ kfree(handle);
+
+exit:
+ printk(KERN_ERR "nameserver_remotenotify_create failed! "
+ "status = 0x%x\n", retval);
+ return NULL;
+}
+
+
+/*
+ * ======== nameserver_remotenotify_create ========
+ * Purpose:
+ * This will delete the nameserver remote transport instance
+ */
+int nameserver_remotenotify_delete(void **rhandle)
+{
+ struct nameserver_remotenotify_object *handle = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+ s32 retval = 0;
+ struct mutex *gate = NULL;
+
+ if (atomic_cmpmask_and_lt(&(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON((rhandle == NULL) || (*rhandle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct nameserver_remotenotify_object *)(*rhandle);
+ obj = (struct nameserver_remotenotify_obj *)handle->obj;
+ if (obj == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(obj->local_gate);
+ if (retval)
+ goto exit;
+
+ retval = nameserver_unregister_remote_driver(obj->remote_proc_id);
+ /* Do we have to bug_on/warn_on oops here intead of exit ?*/
+ if (retval < 0)
+ goto exit;
+
+ kfree(obj->sem_handle);
+ obj->sem_handle = NULL;
+ /* Unregister the event from Notify */
+ retval = notify_unregister_event(obj->params.notify_driver,
+ obj->remote_proc_id,
+ obj->params.notify_event_no,
+ nameserver_remotenotify_callback,
+ (void *)obj);
+ if (retval == NOTIFY_SUCCESS)
+ retval = 0;
+ gate = obj->local_gate;
+ kfree(obj);
+ kfree(handle);
+ *rhandle = NULL;
+ mutex_unlock(gate);
+ kfree(gate);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_remotenotify_delete failed! "
+ "status = 0x%x\n", retval);
+ }
+ return retval;
+}
+
+
+/*
+ * ======== nameserver_remotenotify_create ========
+ * Purpose:
+ * This will give shared memory requirements for the
+ * nameserver remote transport instance
+ */
+u32 nameserver_remotenotify_shared_memreq(const
+ struct nameserver_remotenotify_params *params)
+{
+ u32 total_size;
+ /* params is not used- to remove warning. */
+ (void)params;
+
+ BUG_ON(params == NULL);
+ /*
+ * The attrs takes a Ipc_cacheSize plus 2 Message structs are required.
+ * One for sending request and one for sending response.
+ */
+ total_size = NAMESERVERREMOTENOTIFY_CACHESIZE +
+ (2 * sizeof(struct nameserver_remotenotify_message));
+ return total_size;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify_ioctl.c b/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify_ioctl.c
new file mode 100755
index 000000000000..934b47c068f9
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify_ioctl.c
@@ -0,0 +1,346 @@
+/*
+ * nameserver_remotenotify_ioctl.h
+ *
+ * The nameserver_remotenotify module provides functionality to get name
+ * value pair from a remote nameserver.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <sharedregion.h>
+#include <nameserver_remotenotify_ioctl.h>
+
+/*
+ * ======== nameserver_remotenotify_ioctl_get ======
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_get function
+ */
+static int nameserver_remotenotify_ioctl_get(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ s32 status = 0;
+ ulong size;
+ char *instance_name = NULL;
+ char *name = NULL;
+ u8 *value = NULL;
+
+ BUG_ON(cargs->args.get.instance_name_len == 0);
+ if (cargs->args.get.instance_name_len) {
+ instance_name = kmalloc(cargs->args.get.instance_name_len + 1,
+ GFP_KERNEL);
+ if (instance_name == NULL) {
+ status = ENOMEM;
+ goto exit;
+ }
+
+ instance_name[cargs->args.get.instance_name_len] = '\0';
+ size = copy_from_user(instance_name,
+ cargs->args.get.instance_name,
+ cargs->args.get.instance_name_len);
+ if (size) {
+ status = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ if (cargs->args.get.name_len) {
+ name = kmalloc(cargs->args.get.name_len + 1,
+ GFP_KERNEL);
+ if (name == NULL) {
+ status = ENOMEM;
+ goto exit;
+ }
+
+ instance_name[cargs->args.get.instance_name_len] = '\0';
+ size = copy_from_user(name, cargs->args.get.name,
+ cargs->args.get.name_len);
+ if (size) {
+ status = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ /* Allocate memory for the value */
+ if (cargs->args.get.value_len >= 0) {
+ value = kmalloc(cargs->args.get.value_len, GFP_KERNEL);
+ size = copy_from_user(value, cargs->args.get.value,
+ cargs->args.get.value_len);
+ if (size) {
+ status = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ cargs->args.get.len = nameserver_remotenotify_get(
+ cargs->args.get.handle,
+ instance_name,
+ name,
+ value,
+ cargs->args.get.value_len,
+ cargs->args.get.reserved);
+ cargs->api_status = 0;
+
+exit:
+ kfree(value);
+ kfree(name);
+ kfree(instance_name);
+ return status;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl_shared_memreq ======
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_shared_memreq function
+ */
+static int nameserver_remotenotify_ioctl_shared_memreq(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ struct nameserver_remotenotify_params params;
+ s32 status = 0;
+ ulong size;
+
+ /* params may be NULL. */
+ if (cargs->args.shared_memreq.params != NULL) {
+ size = copy_from_user(&params,
+ cargs->args.shared_memreq.params,
+ sizeof(struct nameserver_remotenotify_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ }
+
+ cargs->args.shared_memreq.shared_mem_size =
+ nameserver_remotenotify_shared_memreq(&params);
+ cargs->api_status = 0;
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_params_init function
+ */
+static int nameserver_remotenotify_ioctl_params_init(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ struct nameserver_remotenotify_params params;
+ s32 status = 0;
+ ulong size;
+
+ nameserver_remotenotify_params_init(cargs->args.params_init.handle,
+ &params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct nameserver_remotenotify_params));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl_create========
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_create function
+ */
+static int nameserver_remotenotify_ioctl_create(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ struct nameserver_remotenotify_params params;
+ s32 status = 0;
+ ulong size;
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct nameserver_remotenotify_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ params.shared_addr = sharedregion_get_ptr((u32 *)
+ cargs->args.create.params->shared_addr);
+ cargs->args.create.handle = nameserver_remotenotify_create(
+ cargs->args.create.proc_id,
+ &params);
+ cargs->api_status = 0;
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_delete function
+ */
+static int nameserver_remotenotify_ioctl_delete(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_remotenotify_delete(
+ &cargs->args.delete_instance.handle);
+ return 0;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_get_config function
+ */
+static int nameserver_remotenotify_ioctl_get_config(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ s32 status = 0;
+ ulong size;
+ struct nameserver_remotenotify_config config;
+
+ nameserver_remotenotify_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct nameserver_remotenotify_config));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_setup function
+ */
+static int nameserver_remotenotify_ioctl_setup(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ struct nameserver_remotenotify_config config;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct nameserver_remotenotify_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = nameserver_remotenotify_setup(&config);
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_remotenotify_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to nameserver_remotenotify_destroy function
+ */
+static int nameserver_remotenotify_ioctl_destroy(
+ struct nameserver_remotenotify_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_remotenotify_destroy();
+ return 0;
+}
+
+/*
+ * ======== nameserver_remotenotify_ioctl ========
+ * Purpose:
+ * This ioctl interface for nameserver_remotenotify module
+ */
+int nameserver_remotenotify_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct nameserver_remotenotify_cmd_args __user *uarg =
+ (struct nameserver_remotenotify_cmd_args __user *)args;
+ struct nameserver_remotenotify_cmd_args cargs;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct nameserver_remotenotify_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_NAMESERVERREMOTENOTIFY_GET:
+ status = nameserver_remotenotify_ioctl_get(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_SHAREDMEMREQ:
+ status = nameserver_remotenotify_ioctl_shared_memreq(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_PARAMS_INIT:
+ status = nameserver_remotenotify_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_CREATE:
+ status = nameserver_remotenotify_ioctl_create(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_DELETE:
+ status = nameserver_remotenotify_ioctl_delete(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_GETCONFIG:
+ status = nameserver_remotenotify_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_SETUP:
+ status = nameserver_remotenotify_ioctl_setup(&cargs);
+ break;
+
+ case CMD_NAMESERVERREMOTENOTIFY_DESTROY:
+ status = nameserver_remotenotify_ioctl_destroy(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct nameserver_remotenotify_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/platform.c b/drivers/dsp/syslink/multicore_ipc/platform.c
new file mode 100644
index 000000000000..5a08400b0743
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/platform.c
@@ -0,0 +1,1420 @@
+/*
+ * platform.c
+ *
+ * Implementation of platform initialization logic for Syslink IPC.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard header files */
+#include <linux/types.h>
+#include <linux/module.h>
+
+
+/* Utilities & Osal headers */
+/*#include <Gate.h>
+#include <GateMutex.h>
+#include <Memory.h>*/
+
+/* SysLink device specific headers */
+#include "../procmgr/proc4430/proc4430.h"
+
+/* Module level headers */
+#include <multiproc.h>
+#include <sysmgr.h>
+#include <_sysmgr.h>
+#include <sysmemmgr.h>
+#include <platform.h>
+#include <gatepeterson.h>
+#include <sharedregion.h>
+#include <listmp.h>
+#include <messageq.h>
+#include <messageq_transportshm.h>
+#include <notify.h>
+/*#include <NotifyDriver.h>*/
+#include <notify_ducatidriver.h>
+#include <nameserver.h>
+#include <nameserver_remote.h>
+#include <nameserver_remotenotify.h>
+#include <procmgr.h>
+#include <heap.h>
+#include <heapbuf.h>
+
+#include <platform_mem.h>
+
+
+/** ============================================================================
+ * Application specific configuration, please change these value according to
+ * your application's need.
+ * ============================================================================
+ */
+/* App defines */
+
+/* SYSM3 Heap */
+#define SYSM3HEAPID 0
+#define SYSM3HEAPNAME "SysMgrHeap0"
+
+/* APPM3 Heap */
+#define APPM3HEAPID 1
+#define APPM3HEAPNAME "SysMgrHeap1"
+
+
+/*!
+ * @brief Interrupt ID of physical interrupt handled by the Notify driver to
+ * receive events.
+ */
+#define BASE_DUCATI2ARM_INTID 26
+
+/*!
+ * @brief Interrupt ID of physical interrupt handled by the Notify driver to
+ * send events.
+ */
+#define BASE_ARM2DUCATI_INTID 50
+
+/*!
+ * @brief Maximum events supported by Notify component
+ */
+#define NOTIFY_MAX_EVENTS 32
+
+/*!
+ * @brief Number of event reserved i.e. can not be used by application
+ */
+#define NOTIFY_NUMRESERVEDEVENTS 0
+
+/*!
+ * @brief Wait for this much poll count when sending event
+ */
+#define NOTIFY_SENDEVENTPOLLCOUNT 0xfffff
+
+/*!
+ * @brief Align buffer in Heap
+ */
+#define HEAPBUF_ALIGN 128
+
+/*!
+ * @brief Number of blocks in the heap
+ */
+#define HEAPBUF_NUMBLOCKS 16
+
+/*!
+ * @brief Size of each blocks in heap
+ */
+#define HEAPBUF_BLOCKSIZE 256
+
+
+/*! @brief Start of IPC shared memory */
+#define SHAREDMEMORY_PHY_BASEADDR 0x9CF00000
+#define SHAREDMEMORY_PHY_BASESIZE 0x00100000
+
+/*! @brief Start of IPC shared memory for SysM3 */
+#define SHAREDMEMORY_PHY_BASEADDR_SYSM3 0x9CF00000
+#define SHAREDMEMORY_PHY_BASESIZE_SYSM3 0x00054000
+
+/*! @brief Start of IPC shared memory AppM3 */
+#define SHAREDMEMORY_PHY_BASEADDR_APPM3 0x9CF55000
+#define SHAREDMEMORY_PHY_BASESIZE_APPM3 0x00054000
+
+/*! @brief Start of IPC SHM for SysM3 */
+#define SHAREDMEMORY_SLV_VRT_BASEADDR_SYSM3 0xA0000000
+#define SHAREDMEMORY_SLV_VRT_BASESIZE_SYSM3 0x00055000
+
+/*! @brief Start of IPC SHM for AppM3 */
+#define SHAREDMEMORY_SLV_VRT_BASEADDR_APPM3 0xA0055000
+#define SHAREDMEMORY_SLV_VRT_BASESIZE_APPM3 0x00055000
+
+/*! @brief Start of Boot load page for SysM3 */
+#define BOOTLOADPAGE_SLV_VRT_BASEADDR_SYSM3 0xA0054000
+#define BOOTLOADPAGE_SLV_VRT_BASESIZE_SYSM3 0x00001000
+
+/*! @brief Start of Boot load page for AppM3 */
+#define BOOTLOADPAGE_SLV_VRT_BASEADDR_APPM3 0xA00A9000
+#define BOOTLOADPAGE_SLV_VRT_BASESIZE_APPM3 0x00001000
+
+/*! @brief Start of SW DMM shared memory */
+#define SHAREDMEMORY_SWDMM_PHY_BASEADDR 0x9F300000
+#define SHAREDMEMORY_SWDMM_PHY_BASESIZE 0x00C00000
+
+/*! @brief Start of SW DMM SHM for Ducati */
+#define SHAREDMEMORY_SWDMM_SLV_VRT_BASEADDR 0x81300000
+#define SHAREDMEMORY_SWDMM_SLV_VRT_BASESIZE 0x00C00000
+
+/*!
+ * @brief Size of the shared memory heap, this heap is used for providing
+ * shared memory to drivers/instances. Should not be used for any other purpose.
+ */
+#define SMHEAP_SIZE SHAREDMEMORY_PHY_BASESIZE
+
+/*!
+ * @brief Shared region index for Shared memory heap.
+ */
+#define SMHEAP_SRINDEX 0
+
+/*!
+ * @brief Shared region index for Shared memory heap for SysM3.
+ */
+#define SMHEAP_SRINDEX_SYSM3 0
+
+/*!
+ * @brief Shared region index for Shared memory heap for AppM3.
+ */
+#define SMHEAP_SRINDEX_APPM3 1
+
+/*!
+ * @brief Shared region index for Shared memory SW DMM section.
+ */
+#define SMHEAP_SRINDEX_SWDMM 2
+
+/*!
+ * @brief Shared region index for SysM3 boot load page
+ */
+#define BOOTLOADPAGE_SRINDEX 1
+
+
+/*!
+ * @brief Event no used by sysmemmgr
+ */
+#define PLATFORM_SYSMEMMGR_EVENTNO 31
+
+
+/** ============================================================================
+ * Command Id used by bootloadpage logic to transfer info
+ * ============================================================================
+ */
+/*!
+ * @brief Command ID for notify driver.
+ */
+#define PLATFORM_CMD_NOTIFYDRIVER SYSMGR_CMD_SHAREDREGION_ENTRY_END
+
+/*!
+ * @brief Command ID for GatePeterson used by nameserverremotenotify.
+ */
+#define PLATFORM_CMD_GPNSRN (PLATFORM_CMD_NOTIFYDRIVER + 1)
+
+/*!
+ * @brief Command ID for nameserverremotenotify.
+ */
+#define PLATFORM_CMD_NSRN (PLATFORM_CMD_NOTIFYDRIVER + 2)
+
+/*!
+ * @brief Command ID for GatePeterson used by HeapBuf.
+ */
+#define PLATFORM_CMD_GPHEAPBUF (PLATFORM_CMD_NOTIFYDRIVER + 3)
+
+/*!
+ * @brief Command ID for HeapBuf.
+ */
+#define PLATFORM_CMD_HEAPBUF (PLATFORM_CMD_NOTIFYDRIVER + 4)
+
+/*!
+ * @brief Command ID for GatePeterson used by MessageQTransportShm.
+ */
+#define PLATFORM_CMD_GPMQT (PLATFORM_CMD_NOTIFYDRIVER + 5)
+
+/*!
+ * @brief Command ID for MessageQTransportShm.
+ */
+#define PLATFORM_CMD_MQT (PLATFORM_CMD_NOTIFYDRIVER + 6)
+
+
+/** ============================================================================
+ * Handles used by platform logic
+ * ============================================================================
+ */
+void *platform_notifydrv_handle;
+
+/* Handles for SysM3 */
+void *platform_nsrn_gate_handle_sysm3;
+void *platform_nsrn_handle_sysm3;
+void *platform_notifydrv_handle_sysm3;
+void *platform_heap_gate_handle_sysm3;
+void *platform_heap_handle_sysm3;
+void *platform_mqt_gate_handle_sysm3;
+void *platform_transport_shm_handle_sysm3;
+void *platform_messageq_sysm3;
+
+/* Handles for AppM3 */
+void *platform_nsrn_gate_handle_appm3;
+void *platform_nsrn_handle_appm3;
+void *platform_notifydrv_handle_appm3;
+void *platform_heap_gate_handle_appm3;
+void *platform_heap_handle_appm3;
+void *platform_mqt_gate_handle_appm3;
+void *platform_transport_shm_handle_appm3;
+void *platform_messageq_appm3;
+
+
+/** ============================================================================
+ * Struct & Enums.
+ * ============================================================================
+ */
+/* Struct for reading platform specific gate peterson configuration values */
+struct platform_gaterpeterson_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 remote_proc_id; /* Remote processor identifier */
+};
+
+struct platform_notify_ducatidrv_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u16 remote_proc_id; /* Remote processor identifier */
+};
+
+struct platform_nameserver_remotenotify_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 notify_event_no; /* Notify Event number to used */
+};
+
+struct platform_heapbuf_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 shared_buf_addr; /* Shared memory address */
+ u32 shared_buf_size; /* Shared memory size */
+ u32 num_blocks;
+ u32 block_size;
+};
+
+struct platform_messageq_transportshm_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 notify_event_no; /* Notify Event number */
+};
+
+struct platform_proc_config_params {
+ u32 use_notify;
+ u32 use_messageq;
+ u32 use_heapbuf;
+ u32 use_frameq;
+ u32 use_ring_io;
+ u32 use_listmp;
+ u32 use_nameserver;
+};
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+/*!
+ * @brief Number of slave memory entries for OMAP4430.
+ */
+#define NUM_MEM_ENTRIES 3
+
+/*!
+ * @brief Number of slave memory entries for OMAP4430 SYSM3.
+ */
+#define NUM_MEM_ENTRIES_SYSM3 1
+
+/*!
+ * @brief Number of slave memory entries for OMAP4430 APPM3.
+ */
+#define NUM_MEM_ENTRIES_APPM3 1
+
+/*!
+ * @brief Position of reset vector memory region in the memEntries array.
+ */
+#define RESET_VECTOR_ENTRY_ID 0
+
+
+/** ============================================================================
+ * Globals
+ * ============================================================================
+ */
+/*!
+ * @brief Array of memory entries for OMAP4430
+ */
+static struct proc4430_mem_entry mem_entries[NUM_MEM_ENTRIES] = {
+ {
+ "DUCATI_SHM_SYSM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_BASEADDR_SYSM3,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_BASEADDR_SYSM3,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_BASESIZE_SYSM3,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_SHM_APPM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_BASEADDR_APPM3,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_BASEADDR_APPM3,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_BASESIZE_APPM3,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_SHM_SWDMM", /* NAME : Name of the memory region */
+ SHAREDMEMORY_SWDMM_PHY_BASEADDR,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SWDMM_SLV_VRT_BASEADDR,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SWDMM_SLV_VRT_BASESIZE,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ }
+};
+
+void *procmgr_handle;
+void *procmgr_proc_handle;
+void *platform_sm_heap_virt_addr_sysm3;
+void *platform_sm_heap_virt_addr_appm3;
+
+/*!
+ * @brief Handle to the ProcMgr instance used.
+ */
+void *procmgr_handle;
+
+/*!
+ * @brief Handle to the Processor instance used.
+ */
+void *procmgr_proc_handle;
+
+/*!
+ * @brief Handle to the SysM3 ProcMgr instance used.
+ */
+void *procmgr_handle_sysm3;
+
+/*!
+ * @brief Handle to the AppM3 ProcMgr instance used.
+ */
+void *procmgr_handle_appm3;
+
+
+/*!
+ * @brief Handle to the SysM3 Processor instance used.
+ */
+void *procmgr_proc_handle_sysm3;
+
+/*!
+ * @brief Handle to the AppM3 Processor instance used.
+ */
+void *procmgr_proc_handle_appm3;
+
+/*!
+ * @brief File ID of the file loaded.
+ */
+u32 procmgr_file_id;
+
+/*!
+ * @brief Shared memory heap virtual address.
+ */
+void *platform_sm_heap_virt_addr;
+
+/*!
+ * @brief Shared memory heap physical address.
+ */
+void *platform_sm_heap_phys_addr;
+
+/*!
+ * @brief Scalability info
+ */
+struct sysmgr_proc_config pc_params;
+
+/*!
+ * @brief SW DMM virtual address.
+ */
+void *platform_sw_dmm_virt_addr;
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+
+/*
+ * ======== platform_setup ========
+ * Purpose:
+ * TBD: logic would change completely in the final system.
+ */
+s32 platform_setup(struct sysmgr_config *config)
+{
+
+ s32 status = 0;
+ struct proc4430_config proc_config;
+ struct proc_mgr_params params;
+ struct proc4430_params proc_params;
+ struct proc_mgr_attach_params attach_params;
+ u16 proc_id;
+ struct sysmemmgr_config sysmemmgr_cfg;
+ struct platform_mem_map_info info;
+
+ if (WARN_ON(config == NULL)) {
+ /*! @retval SYSMGR_E_INVALIDARG Argument of type
+ * (GatePeterson_Config *) passed is null*/
+ status = -EINVAL;
+ goto invalid_config_fail;
+ }
+
+ /* Map the static region */
+ info.src = SHAREDMEMORY_PHY_BASEADDR;
+ info.size = SHAREDMEMORY_PHY_BASESIZE;
+ info.is_cached = false;
+ status = platform_mem_map(&info);
+ if (status < 0)
+ goto mem_map_fail;
+
+ /* Get default config for System memory manager */
+ sysmemmgr_get_config(&sysmemmgr_cfg);
+ /* Initialize the System memory manager */
+ sysmemmgr_cfg.static_mem_size = SHAREDMEMORY_PHY_BASESIZE;
+ sysmemmgr_cfg.static_phys_base_addr = SHAREDMEMORY_PHY_BASEADDR;
+ sysmemmgr_cfg.static_virt_base_addr = info.dst;
+ sysmemmgr_cfg.event_no = PLATFORM_SYSMEMMGR_EVENTNO;
+ status = sysmemmgr_setup(&sysmemmgr_cfg);
+ if (status < 0)
+ goto sysmemmgr_setup_fail;
+
+ /* The heap for SysM3 and AppM3 are allocated at once */
+ platform_sm_heap_virt_addr = sysmemmgr_alloc(SMHEAP_SIZE,
+ sysmemmgr_allocflag_physical);
+ if (platform_sm_heap_virt_addr == NULL)
+ goto sysmemmgr_alloc_fail;
+
+ platform_sm_heap_virt_addr_sysm3 = platform_sm_heap_virt_addr;
+ /* The AppM3 shared area is after SysM3 heap + boot load page */
+ platform_sm_heap_virt_addr_appm3 = (platform_sm_heap_virt_addr_sysm3 +
+ SHAREDMEMORY_PHY_BASESIZE_SYSM3 +
+ BOOTLOADPAGE_SLV_VRT_BASESIZE_SYSM3);
+
+
+ /* Create the shared region entry for the SysM3 heap */
+ sharedregion_add(SMHEAP_SRINDEX_SYSM3,
+ platform_sm_heap_virt_addr_sysm3,
+ SHAREDMEMORY_PHY_BASESIZE_SYSM3);
+ /* Zero out the shared memory for SysM3 */
+ memset((void *) platform_sm_heap_virt_addr_sysm3,
+ 0,
+ SHAREDMEMORY_PHY_BASESIZE_SYSM3);
+
+ /* Create the shared region entry for the AppM3 heap */
+ sharedregion_add(SMHEAP_SRINDEX_APPM3,
+ platform_sm_heap_virt_addr_appm3,
+ SHAREDMEMORY_PHY_BASESIZE_APPM3);
+ /* Zero out the shared memory for AppM3 */
+ memset((void *) platform_sm_heap_virt_addr_appm3,
+ 0,
+ SHAREDMEMORY_PHY_BASESIZE_APPM3);
+
+ /* Map the static region */
+ info.src = SHAREDMEMORY_SWDMM_PHY_BASEADDR;
+ info.size = SHAREDMEMORY_SWDMM_PHY_BASESIZE;
+ info.is_cached = false;
+ status = platform_mem_map(&info);
+ if (status < 0)
+ goto mem_map_fail;
+ platform_sw_dmm_virt_addr = (void *) info.dst;
+ /* Create the shared region entry for the SW DMM heap */
+ sharedregion_add(SMHEAP_SRINDEX_SWDMM,
+ platform_sw_dmm_virt_addr,
+ info.size);
+
+ proc4430_get_config(&proc_config);
+ status = proc4430_setup(&proc_config);
+ if (status < 0)
+ goto proc_setup_fail;
+
+
+ /* Get MultiProc ID by name. */
+ proc_id = multiproc_get_id("SysM3");
+
+ /* Create an instance of the Processor object for OMAP4430 */
+ proc4430_params_init(NULL, &proc_params);
+ proc_params.num_mem_entries = NUM_MEM_ENTRIES;
+ proc_params.mem_entries = mem_entries;
+ proc_params.reset_vector_mem_entry = RESET_VECTOR_ENTRY_ID;
+ procmgr_proc_handle = proc4430_create(proc_id, &proc_params);
+ if (procmgr_proc_handle == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto proc_create_fail;
+ }
+
+ /* Initialize parameters */
+ proc_mgr_params_init(NULL, &params);
+ params.proc_handle = procmgr_proc_handle;
+ procmgr_handle = proc_mgr_create(proc_id, &params);
+ if (procmgr_handle == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto proc_mgr_create_fail;
+ }
+
+ proc_mgr_get_attach_params(NULL, &attach_params);
+ /* Default params will be used if NULL is passed. */
+ status = proc_mgr_attach(procmgr_handle, &attach_params);
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto proc_mgr_attach_fail;
+ }
+
+
+ /* SysM3 and AppM3 use the same handle */
+ procmgr_handle_sysm3 = procmgr_handle;
+ procmgr_proc_handle_sysm3 = procmgr_proc_handle;
+
+ procmgr_handle = NULL;
+ procmgr_proc_handle = NULL;
+
+
+
+ /* Get MultiProc ID by name. */
+ proc_id = multiproc_get_id("AppM3");
+
+ /* Create an instance of the Processor object for OMAP4430 */
+ proc4430_params_init(NULL, &proc_params);
+ proc_params.num_mem_entries = NUM_MEM_ENTRIES;
+ proc_params.mem_entries = mem_entries;
+ proc_params.reset_vector_mem_entry = RESET_VECTOR_ENTRY_ID;
+ procmgr_proc_handle = proc4430_create(proc_id, &proc_params);
+ if (procmgr_proc_handle == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto proc_create_fail;
+ }
+
+ /* Initialize parameters */
+ proc_mgr_params_init(NULL, &params);
+ params.proc_handle = procmgr_proc_handle;
+ procmgr_handle = proc_mgr_create(proc_id, &params);
+ if (procmgr_handle == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto proc_mgr_create_fail;
+ }
+
+ proc_mgr_get_attach_params(NULL, &attach_params);
+ /* Default params will be used if NULL is passed. */
+ status = proc_mgr_attach(procmgr_handle, &attach_params);
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto proc_mgr_attach_fail;
+ }
+
+ procmgr_handle_appm3 = procmgr_handle;
+ procmgr_proc_handle_appm3 = procmgr_proc_handle;
+ goto exit;
+
+proc_mgr_attach_fail:
+ printk(KERN_ERR "platform_setup: proc_mgr_attach failed [0x%x]"
+ " for processor [0x%x]\n", status, proc_id);
+ goto exit;
+
+proc_mgr_create_fail:
+ printk(KERN_ERR "platform_setup: proc_mgr_create failed [0x%x]",
+ status);
+ goto exit;
+proc_create_fail:
+ printk(KERN_ERR "platform_setup: proc4430_create failed [0x%x]",
+ status);
+ goto exit;
+proc_setup_fail:
+ printk(KERN_ERR "platform_setup: proc4430_setup failed [0x%x]",
+ status);
+ goto exit;
+sysmemmgr_alloc_fail:
+ printk(KERN_ERR "platform_setup: sysmemmgr_alloc failed [0x%x]",
+ status);
+ goto exit;
+sysmemmgr_setup_fail:
+ printk(KERN_ERR "platform_setup: sysmemmgr_setup failed [0x%x]",
+ status);
+ goto exit;
+mem_map_fail:
+ printk(KERN_ERR "platform_setup: platform_mem_map failed [0x%x]",
+ status);
+ goto exit;
+invalid_config_fail:
+ printk(KERN_ERR "platform_setup: Argument of type (sysmgr_get_config *)"
+ " passed is null [0x%x]", status);
+exit:
+ return status;
+}
+
+
+/*
+ * ======== platform_destroy ========
+ * Purpose:
+ * Function to finalize the platform.
+ */
+s32 platform_destroy(void)
+{
+ s32 status = 0;
+ struct platform_mem_unmap_info u_info;
+ /* Delete the Processor instances */
+
+ if (procmgr_handle_appm3 != NULL) {
+ status = proc_mgr_detach(procmgr_handle_appm3);
+ WARN_ON(status < 0);
+ }
+
+ if (procmgr_proc_handle_appm3 != NULL) {
+ status = proc4430_delete(&procmgr_proc_handle_appm3);
+ WARN_ON(status < 0);
+ }
+
+ if (procmgr_handle_appm3 != NULL) {
+ status = proc_mgr_delete(&procmgr_handle_appm3);
+ WARN_ON(status < 0);
+ }
+
+ if (procmgr_handle_sysm3 != NULL) {
+ status = proc_mgr_detach(procmgr_handle_sysm3);
+ WARN_ON(status < 0);
+ }
+
+ if (procmgr_proc_handle_sysm3 != NULL) {
+ status = proc4430_delete(&procmgr_proc_handle_sysm3);
+ WARN_ON(status < 0);
+ }
+
+ if (procmgr_handle_sysm3 != NULL) {
+ status = proc_mgr_delete(&procmgr_handle_sysm3);
+ WARN_ON(status < 0);
+ }
+
+ status = proc4430_destroy();
+ WARN_ON(status < 0);
+
+
+ sharedregion_remove(SMHEAP_SRINDEX_APPM3);
+ sharedregion_remove(SMHEAP_SRINDEX_SYSM3);
+
+ sysmemmgr_free(platform_sm_heap_virt_addr, SMHEAP_SIZE,
+ sysmemmgr_allocflag_physical);
+
+ status = sysmemmgr_destroy();
+ WARN_ON(status < 0);
+
+ if (platform_sm_heap_virt_addr != NULL) {
+ u_info.addr = (u32) platform_sm_heap_virt_addr;
+ platform_mem_unmap(&u_info);
+ }
+
+ if (platform_sw_dmm_virt_addr != NULL) {
+ u_info.addr = (u32) platform_sw_dmm_virt_addr;
+ platform_mem_unmap(&u_info);
+ }
+
+ return status;
+}
+
+
+/*
+ * ======== platform_load_callback ========
+ * Purpose:
+ * Function called by proc_mgr when slave is in loaded state.
+ */
+void platform_load_callback(void *arg)
+{
+ s32 status = 0;
+ u16 proc_id = (u32) arg;
+ u16 local_id = MULTIPROC_INVALIDID;
+ struct sharedregion_info info;
+ u32 boot_load_page;
+ u32 sh_addr_base;
+ u32 nwrite;
+ int index;
+
+ printk(KERN_ERR "platform_load_callback\n");
+
+ /* Get the written entry */
+ local_id = multiproc_get_id(NULL);
+
+ if (proc_id == multiproc_get_id("SysM3"))
+ index = SMHEAP_SRINDEX_SYSM3;
+ else if (proc_id == multiproc_get_id("AppM3"))
+ index = SMHEAP_SRINDEX_APPM3;
+ else {
+ status = SYSMGR_E_FAIL;
+ goto proc_invalid_id;
+ }
+ /* Add the to sharedregion */
+ switch (index) {
+ case SMHEAP_SRINDEX_SYSM3: /* For SysM3 */
+ /* Get the boot load page address */
+ boot_load_page = BOOTLOADPAGE_SLV_VRT_BASEADDR_SYSM3;
+ status = proc_mgr_translate_addr(procmgr_handle_sysm3,
+ (void *) &sh_addr_base,
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT,
+ (void *) boot_load_page,
+ PROC_MGR_ADDRTYPE_SLAVEVIRT);
+ if (status < 0)
+ break;
+ /* Zero out the boot load page */
+ memset((void *) sh_addr_base,
+ 0,
+ BOOTLOADPAGE_SLV_VRT_BASESIZE_SYSM3);
+ break;
+
+ case SMHEAP_SRINDEX_APPM3: /* For AppM3 */
+ /* Get the boot load page address */
+ boot_load_page = BOOTLOADPAGE_SLV_VRT_BASEADDR_APPM3;
+ status = proc_mgr_translate_addr(procmgr_handle_appm3,
+ (void *) &sh_addr_base,
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT,
+ (void *) boot_load_page,
+ PROC_MGR_ADDRTYPE_SLAVEVIRT);
+ if (status < 0)
+ break;
+
+ /* Zero out the boot load page */
+ memset((void *) sh_addr_base,
+ 0,
+ BOOTLOADPAGE_SLV_VRT_BASESIZE_APPM3);
+ break;
+ }
+
+ if (status < 0)
+ goto proc_mgr_translate_addr_fail;
+
+ /* Set the boot load page address */
+ sysmgr_set_boot_load_page(proc_id, sh_addr_base);
+
+ /* Write the boot table (containing both regions) to */
+ /* the current processor */
+
+ /* For SysM3 */
+ sharedregion_get_table_info(SMHEAP_SRINDEX_SYSM3,
+ local_id,
+ &info);
+ platform_sm_heap_virt_addr_sysm3 = sysmemmgr_translate(
+ platform_sm_heap_virt_addr_sysm3,
+ sysmemmgr_xltflag_kvirt2phys);
+ info.base = (void *) SHAREDMEMORY_SLV_VRT_BASEADDR_SYSM3;
+ nwrite = sysmgr_put_object_config(proc_id,
+ (void *) &info,
+ SYSMGR_CMD_SHAREDREGION_ENTRY_START +
+ SMHEAP_SRINDEX_SYSM3,
+ sizeof(struct sharedregion_info));
+ WARN_ON(nwrite != sizeof(struct sharedregion_info));
+
+ /* For AppM3 */
+ sharedregion_get_table_info(SMHEAP_SRINDEX_APPM3,
+ local_id,
+ &info);
+ platform_sm_heap_virt_addr_appm3 = sysmemmgr_translate(
+ platform_sm_heap_virt_addr_appm3,
+ sysmemmgr_xltflag_kvirt2phys);
+ info.base = (void *) SHAREDMEMORY_SLV_VRT_BASEADDR_APPM3;
+
+ /* Write info into the boot load page */
+ nwrite = sysmgr_put_object_config(proc_id,
+ (void *) &info,
+ SYSMGR_CMD_SHAREDREGION_ENTRY_START +
+ SMHEAP_SRINDEX_APPM3,
+ sizeof(struct sharedregion_info));
+ WARN_ON(nwrite != sizeof(struct sharedregion_info));
+
+ /* For SW DMM region */
+ sharedregion_get_table_info(SMHEAP_SRINDEX_SWDMM,
+ local_id,
+ &info);
+ info.base = (void *) SHAREDMEMORY_SWDMM_SLV_VRT_BASEADDR;
+
+ /* Write info into the boot load page */
+ nwrite = sysmgr_put_object_config(proc_id,
+ (void *) &info,
+ SYSMGR_CMD_SHAREDREGION_ENTRY_START +
+ SMHEAP_SRINDEX_SWDMM,
+ sizeof(struct sharedregion_info));
+ WARN_ON(nwrite != sizeof(struct sharedregion_info));
+ goto exit;
+
+proc_mgr_translate_addr_fail:
+ printk(KERN_ERR "platform_load_callback: proc_mgr_translate_addr failed"
+ " [0x%x] for proc_id [0x%x]\n",
+ status, proc_id);
+ goto exit;
+proc_invalid_id:
+ printk(KERN_ERR "platform_load_callback failed invalid proc_id [0x%x]\n",
+ proc_id);
+exit:
+ return;
+}
+EXPORT_SYMBOL(platform_load_callback);
+
+
+/*
+ * ======== platform_start_callback ========
+ * Purpose:
+ * Function called by proc_mgr when slave is in started state.
+ * FIXME: logic would change completely in the final system.
+ */
+void platform_start_callback(void *arg)
+{
+ s32 status = 0;
+ u16 local_id = MULTIPROC_INVALIDID;
+ u16 proc_id = (u32) arg;
+ u32 nread = 0;
+ u32 i = 0;
+ u32 cmd_id;
+ u32 sh_addr;
+ int index;
+
+ struct notify_ducatidrv_params notify_shm_params;
+ struct gatepeterson_params gate_params;
+ struct nameserver_remotenotify_params nsr_params;
+ struct heapbuf_params heap_params;
+ struct messageq_transportshm_params msgqt_params;
+ struct sharedregion_config sr_config;
+ struct sharedregion_info info;
+
+ struct platform_notify_ducatidrv_params pnds_params;
+ struct platform_heapbuf_params phb_params;
+ struct platform_gaterpeterson_params pgp_params;
+ struct platform_nameserver_remotenotify_params pnsrn_params;
+ struct platform_messageq_transportshm_params pmqt_params;
+ /*u32 proc_ids[2];*/
+
+ printk(KERN_ERR "platform_start_callback\n");
+ if (proc_id == multiproc_get_id("SysM3"))
+ index = SMHEAP_SRINDEX_SYSM3;
+ else if (proc_id == multiproc_get_id("AppM3"))
+ index = SMHEAP_SRINDEX_APPM3;
+ else {
+ status = SYSMGR_E_FAIL;
+ goto proc_invalid_id;
+ }
+ /* Wait for slave to write the scalability info */
+ sysmgr_wait_for_scalability_info(proc_id);
+ /* Read the scalability info */
+ do {
+ nread = sysmgr_get_object_config(proc_id, (void *) &pc_params,
+ SYSMGR_CMD_SCALABILITY,
+ sizeof(struct sysmgr_proc_config));
+ } while (nread != sizeof(struct sysmgr_proc_config));
+
+ if (status >= 0) {
+ local_id = multiproc_get_id(NULL);
+ status = multiproc_set_local_id(local_id);
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto multiproc_fail;
+ }
+ }
+
+ /* TODO: add condition: proc_id == multiproc_get_id("SysM3") */
+ if (pc_params.use_notify) {
+ do {
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &pnds_params,
+ PLATFORM_CMD_NOTIFYDRIVER,
+ sizeof(struct \
+ platform_notify_ducatidrv_params));
+ } while (nread != \
+ sizeof(struct platform_notify_ducatidrv_params));
+
+ sh_addr = (u32)sharedregion_get_ptr((u32 *)
+ pnds_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ notify_ducatidrv_params_init(NULL, &notify_shm_params);
+ notify_shm_params.shared_addr = sh_addr;
+ notify_shm_params.shared_addr_size = \
+ pnds_params.shared_mem_size;
+ notify_shm_params.num_events = NOTIFY_MAX_EVENTS;
+ notify_shm_params.num_reserved_events = \
+ NOTIFY_NUMRESERVEDEVENTS;
+ notify_shm_params.send_event_poll_count = \
+ NOTIFY_SENDEVENTPOLLCOUNT;
+ notify_shm_params.recv_int_id = BASE_DUCATI2ARM_INTID;
+ notify_shm_params.send_int_id = BASE_ARM2DUCATI_INTID;
+ notify_shm_params.remote_proc_id = proc_id;
+ if (platform_notifydrv_handle == NULL) {
+ /* Create instance of Notify Ducati Driver */
+ platform_notifydrv_handle = notify_ducatidrv_create(
+ "NOTIFYDRIVER_DUCATI",
+ &notify_shm_params);
+ if (platform_notifydrv_handle == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto notify_ducatidrv_create_fail;
+ }
+ }
+
+ /* The notify is created only once and used for Sys and App */
+ if (index == SMHEAP_SRINDEX_APPM3)
+ platform_notifydrv_handle_appm3 =
+ platform_notifydrv_handle;
+ else
+ platform_notifydrv_handle_sysm3 =
+ platform_notifydrv_handle;
+ }
+ if (pc_params.use_nameserver) {
+ do {
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &pgp_params,
+ PLATFORM_CMD_GPNSRN,
+ sizeof(struct \
+ platform_gaterpeterson_params));
+ } while (nread != sizeof(struct platform_gaterpeterson_params));
+ sh_addr = (u32)sharedregion_get_ptr((u32 *)
+ pgp_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ gatepeterson_params_init(NULL, &gate_params);
+ gate_params.shared_addr = (void *) sh_addr;
+ gate_params.shared_addr_size = pgp_params.shared_mem_size;
+ do {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = gatepeterson_open(
+ &platform_nsrn_gate_handle_appm3,
+ &gate_params);
+ else
+ status = gatepeterson_open(
+ &platform_nsrn_gate_handle_sysm3,
+ &gate_params);
+ } while (status == -ENXIO);
+
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto gatepeterson_open_fail;
+ }
+
+ do {
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &pnsrn_params,
+ PLATFORM_CMD_NSRN,
+ sizeof(struct \
+ platform_nameserver_remotenotify_params));
+ } while (nread != \
+ sizeof(struct
+ platform_nameserver_remotenotify_params));
+ sh_addr = (u32) sharedregion_get_ptr((u32 *)
+ pnsrn_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ /*
+ * Create the NameServerRemote implementation that is used to
+ * communicate with the remote processor. It uses some shared
+ * memory and the Notify module.
+ *
+ * Note that this implementation uses Notify to communicate, so
+ * interrupts need to be enabled.
+ */
+ nameserver_remotenotify_params_init(NULL, &nsr_params);
+ nsr_params.notify_driver = platform_notifydrv_handle;
+ /* Both are using same notify */
+ nsr_params.notify_event_no = pnsrn_params.notify_event_no;
+ nsr_params.shared_addr = (void *) sh_addr;
+ nsr_params.shared_addr_size = pnsrn_params.shared_mem_size;
+ if (index == SMHEAP_SRINDEX_APPM3) {
+ nsr_params.gate =
+ (void *) platform_nsrn_gate_handle_appm3;
+ platform_nsrn_handle_appm3 =
+ nameserver_remotenotify_create(
+ proc_id,
+ &nsr_params);
+ if (platform_nsrn_handle_appm3 == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto nameserver_remotenotify_create_fail;
+ }
+ } else {
+ nsr_params.gate =
+ (void *) platform_nsrn_gate_handle_sysm3;
+ platform_nsrn_handle_sysm3 =
+ nameserver_remotenotify_create(
+ proc_id,
+ &nsr_params);
+ if (platform_nsrn_handle_sysm3 == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto nameserver_remotenotify_create_fail;
+ }
+ }
+ }
+ if (pc_params.use_heapbuf) {
+ do {
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &pgp_params,
+ PLATFORM_CMD_GPHEAPBUF,
+ sizeof(struct \
+ platform_gaterpeterson_params));
+ } while (nread != sizeof(struct
+ platform_gaterpeterson_params));
+ sh_addr = (u32) sharedregion_get_ptr((u32 *)
+ pgp_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ gatepeterson_params_init(NULL, &gate_params);
+ gate_params.shared_addr = (void *) sh_addr;
+ gate_params.shared_addr_size = pgp_params.shared_mem_size;
+ do {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = gatepeterson_open(
+ &platform_heap_gate_handle_appm3,
+ &gate_params);
+ else
+ status = gatepeterson_open(
+ &platform_heap_gate_handle_sysm3,
+ &gate_params);
+ } while (status == -ENXIO);
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto gatepeterson_open_fail;
+ }
+
+ do {
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &phb_params,
+ PLATFORM_CMD_HEAPBUF,
+ sizeof(struct platform_heapbuf_params));
+ } while (nread != sizeof(struct platform_heapbuf_params));
+ /* Create the heap. */
+ sh_addr = (u32) sharedregion_get_ptr((u32 *)
+ phb_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ heapbuf_params_init(NULL, &heap_params);
+ heap_params.shared_addr = (void *) sh_addr;
+ heap_params.align = HEAPBUF_ALIGN;
+ heap_params.num_blocks = phb_params.num_blocks;
+ heap_params.block_size = phb_params.block_size;
+ sh_addr = (u32) sharedregion_get_ptr((u32 *)
+ phb_params.shared_buf_size);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ heap_params.shared_buf_size = phb_params.shared_buf_size;
+ heap_params.shared_buf = (void *) sh_addr;
+ if (index == SMHEAP_SRINDEX_APPM3) {
+ heap_params.name = APPM3HEAPNAME;
+ heap_params.gate = platform_heap_gate_handle_appm3;
+ } else {
+ heap_params.name = SYSM3HEAPNAME;
+ heap_params.gate = platform_heap_gate_handle_sysm3;
+ }
+ heap_params.shared_addr_size = phb_params.shared_mem_size;
+ do {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = heapbuf_open(
+ &platform_heap_handle_appm3,
+ &heap_params);
+ else
+ status = heapbuf_open(
+ &platform_heap_handle_sysm3,
+ &heap_params);
+ } while (status == -ENXIO);
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto heapbuf_open_fail;
+ }
+ }
+ if (pc_params.use_messageq) {
+ do {
+ nread = sysmgr_get_object_config(proc_id, &pgp_params,
+ PLATFORM_CMD_GPMQT,
+ sizeof(struct \
+ platform_gaterpeterson_params));
+ } while (nread != sizeof(struct platform_gaterpeterson_params));
+ sh_addr = (u32) sharedregion_get_ptr((u32 *)
+ pgp_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ gatepeterson_params_init(NULL, &gate_params);
+ gate_params.shared_addr = (void *) sh_addr;
+ gate_params.shared_addr_size = pgp_params.shared_mem_size;
+ do {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = gatepeterson_open(
+ &platform_mqt_gate_handle_appm3,
+ &gate_params);
+ else
+ status = gatepeterson_open(
+ &platform_mqt_gate_handle_sysm3,
+ &gate_params);
+ } while (status == -ENXIO);
+
+ if (status < 0) {
+ status = SYSMGR_E_FAIL;
+ goto gatepeterson_open_fail;
+ }
+
+ do {
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &pmqt_params,
+ PLATFORM_CMD_MQT,
+ sizeof(struct \
+ platform_messageq_transportshm_params));
+ } while (nread != sizeof(
+ struct platform_messageq_transportshm_params));
+ /* Register this heap with platform_messageq */
+ if (index == SMHEAP_SRINDEX_APPM3)
+ messageq_register_heap(platform_heap_handle_appm3,
+ APPM3HEAPID);
+ else
+ messageq_register_heap(platform_heap_handle_sysm3,
+ SYSM3HEAPID);
+ sh_addr = (u32) sharedregion_get_ptr((u32 *)
+ pmqt_params.shared_mem_addr);
+ if (sh_addr == (u32)NULL) {
+ status = SYSMGR_E_FAIL;
+ goto sharedregion_getptr_fail;
+ }
+ messageq_transportshm_params_init(NULL, &msgqt_params);
+ msgqt_params.shared_addr = (void *) sh_addr;
+ msgqt_params.notify_event_no = pmqt_params.notify_event_no;
+ msgqt_params.notify_driver = platform_notifydrv_handle;
+ msgqt_params.shared_addr_size = pmqt_params.shared_mem_size;
+ if (index == SMHEAP_SRINDEX_APPM3) {
+ msgqt_params.gate = platform_mqt_gate_handle_appm3;
+ platform_transport_shm_handle_appm3 =
+ messageq_transportshm_create(
+ proc_id,
+ &msgqt_params);
+ if (platform_transport_shm_handle_appm3 == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto messageq_transportshm_create_fail;
+ }
+ } else {
+ msgqt_params.gate = platform_mqt_gate_handle_sysm3;
+ platform_transport_shm_handle_sysm3 =
+ messageq_transportshm_create(
+ proc_id,
+ &msgqt_params);
+ if (platform_transport_shm_handle_sysm3 == NULL) {
+ status = SYSMGR_E_FAIL;
+ goto messageq_transportshm_create_fail;
+ }
+
+ }
+ }
+
+ if (status >= 0) {
+ /* Wait for slave to complete the setup */
+ sysmgr_wait_for_slave_setup(proc_id);
+
+ /* Now get the Shared region entries that may have been created
+ * by Slave, but actual physical memory is not assigned to
+ * those entries, only virtual DSP memory exists.
+ */
+ sharedregion_get_config(&sr_config);
+ for (i = 0; i < sr_config.max_regions; i++) {
+ cmd_id = SYSMGR_CMD_SHAREDREGION_ENTRY_START + i;
+ nread = sysmgr_get_object_config(proc_id,
+ (void *) &info, cmd_id,
+ sizeof(struct sharedregion_info));
+ if (nread == sizeof(struct sharedregion_info)) {
+ /* FIXME: Do the DMM and convert the entry into
+ * kernel virtual address and put it in the
+ * shared region for host */
+ }
+ }
+ }
+ goto exit;
+
+messageq_transportshm_create_fail:
+ printk(KERN_ERR "platform_start_callback: "
+ "messageq_transportshm_create failed status[0x%x]", status);
+ goto exit;
+heapbuf_open_fail:
+ printk(KERN_ERR "platform_start_callback: gatepeterson_open "
+ "failed status[0x%x]", status);
+ goto exit;
+nameserver_remotenotify_create_fail:
+ printk(KERN_ERR "platform_start_callback: "
+ "nameserver_remotenotify_create failed status[0x%x]", status);
+ goto exit;
+gatepeterson_open_fail:
+ printk(KERN_ERR "platform_start_callback: gatepeterson_open "
+ "failed status[0x%x]", status);
+ goto exit;
+notify_ducatidrv_create_fail:
+ printk(KERN_ERR "platform_start_callback: notify_ducatidrv_create "
+ "failed status[0x%x]", status);
+ goto exit;
+sharedregion_getptr_fail:
+ printk(KERN_ERR "platform_start_callback: sharedregion_get_ptr failed"
+ " status[0x%x]", status);
+ goto exit;
+multiproc_fail:
+ printk(KERN_ERR "platform_start_callback: multiproc_set_local_id failed"
+ " status[0x%x]", status);
+proc_invalid_id:
+ printk(KERN_ERR "platform_load_callback failed invalid"
+ " proc_id [0x%x]\n", proc_id);
+exit:
+ return;
+}
+EXPORT_SYMBOL(platform_start_callback);
+/* FIXME: since application has to call this API for now */
+
+
+/*
+ * ======== platform_stop_callback ========
+ * Purpose:
+ * Function called by proc_mgr when slave is in stopped state.
+ * FIXME: logic would change completely in the final system.
+ */
+void platform_stop_callback(void *arg)
+{
+ s32 status = 0;
+ u16 proc_id = (u32) arg;
+ int index = 0;
+ u32 nread = 0;
+
+ if (proc_id == multiproc_get_id("SysM3"))
+ index = SMHEAP_SRINDEX_SYSM3;
+ else if (proc_id == multiproc_get_id("AppM3"))
+ index = SMHEAP_SRINDEX_APPM3;
+ else {
+ status = SYSMGR_E_FAIL;
+ goto proc_invalid_id;
+ }
+
+ /* Read the scalability info */
+ do {
+ nread = sysmgr_get_object_config(proc_id, (void *) &pc_params,
+ SYSMGR_CMD_SCALABILITY,
+ sizeof(struct sysmgr_proc_config));
+ } while (nread != sizeof(struct sysmgr_proc_config));
+
+ if (pc_params.use_messageq) {
+ /* Finalize drivers */
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = gatepeterson_close(
+ &platform_mqt_gate_handle_appm3);
+ else
+ status = gatepeterson_close(
+ &platform_mqt_gate_handle_sysm3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : mqt "
+ "gatepeterson_close failed [0x%x]", status);
+ }
+
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = messageq_transportshm_delete(
+ &platform_transport_shm_handle_appm3);
+ else
+ status = messageq_transportshm_delete(
+ &platform_transport_shm_handle_sysm3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "messageq_transportshm_delete failed [0x%x]",
+ status);
+ }
+ }
+
+ if (pc_params.use_nameserver) {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = gatepeterson_close(
+ &platform_nsrn_gate_handle_appm3);
+ else
+ status = gatepeterson_close(
+ &platform_nsrn_gate_handle_sysm3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : nsrn"
+ "gatepeterson_close failed [0x%x]", status);
+ }
+
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = nameserver_remotenotify_delete(
+ &platform_nsrn_handle_appm3);
+ else
+ status = nameserver_remotenotify_delete(
+ &platform_nsrn_handle_sysm3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "nameserver_remotenotify_delete failed [0x%x]",
+ status);
+ }
+ }
+
+ if (pc_params.use_heapbuf) {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = messageq_unregister_heap(APPM3HEAPID);
+ else
+ status = messageq_unregister_heap(SYSM3HEAPID);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "messageq_unregister_heap failed [0x%x]",
+ status);
+ }
+
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = heapbuf_close(platform_heap_handle_appm3);
+ else
+ status = heapbuf_close(platform_heap_handle_sysm3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "heapbuf_close failed [0x%x]", status);
+ }
+ if (index == SMHEAP_SRINDEX_APPM3)
+ status = gatepeterson_close(
+ &platform_heap_gate_handle_appm3);
+ else
+ status = gatepeterson_close(
+ &platform_heap_gate_handle_sysm3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : heap"
+ "gatepeterson_close failed [0x%x]", status);
+ }
+
+ }
+
+ if (pc_params.use_notify) {
+ if (index == SMHEAP_SRINDEX_APPM3)
+ platform_notifydrv_handle_appm3 = NULL;
+ else
+ platform_notifydrv_handle_sysm3 = NULL;
+
+ if (platform_notifydrv_handle_sysm3 == NULL &&
+ platform_notifydrv_handle_appm3 == NULL) {
+ status = notify_ducatidrv_delete(
+ (struct notify_driver_object **)
+ &platform_notifydrv_handle);
+ platform_notifydrv_handle = NULL;
+ }
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "notify_ducatidrv_delete failed [0x%x]",
+ status);
+ }
+ }
+
+ status = sharedregion_remove(SMHEAP_SRINDEX_APPM3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "sharedregion_remove failed [0x%x]", status);
+ }
+
+ status = sharedregion_remove(SMHEAP_SRINDEX_SYSM3);
+ if (status < 0) {
+ printk(KERN_ERR "platform_stop_callback : "
+ "sharedregion_remove failed [0x%x]", status);
+ }
+
+ goto exit;
+
+proc_invalid_id:
+ printk(KERN_ERR "platform_load_callback failed invalid"
+ " proc_id [0x%x]\n", proc_id);
+exit:
+ return;
+}
+EXPORT_SYMBOL(platform_stop_callback);
+/* FIXME: since application has to call this API for now */
diff --git a/drivers/dsp/syslink/multicore_ipc/platform_mem.c b/drivers/dsp/syslink/multicore_ipc/platform_mem.c
new file mode 100644
index 000000000000..025605b8295c
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/platform_mem.c
@@ -0,0 +1,288 @@
+/*
+ * platform_mem.c
+ *
+ * Target memory management interface implementation.
+ *
+ * This abstracts the Memory management interface in the kernel
+ * code. Allocation, Freeing-up, copy and address translate are
+ * supported for the kernel memory management.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Linux specific header files */
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <platform_mem.h>
+#include <atomic_linux.h>
+
+/* Macro to make a correct module magic number with ref_count */
+#define PLATFORM_MEM_MAKE_MAGICSTAMP(x) ((PLATFORM_MEM_MODULEID << 12u) | (x))
+
+/*
+ * Structure for containing
+ */
+struct platform_mem_map_table_info {
+ struct list_head mem_entry; /* Pointer to mem_entry entry */
+ u32 physical_address; /* Actual address */
+ u32 knl_virtual_address; /* Mapped address */
+ u32 size; /* Size of the region mapped */
+};
+
+/*
+ * Structure defining state object of system memory manager
+ */
+struct platform_mem_module_object {
+ atomic_t ref_count; /* Reference count */
+ struct list_head map_table; /* Head of map table */
+ struct mutex *gate; /* Pointer to lock */
+};
+
+
+/*
+ * Object containing state of the platform mem module
+ */
+static struct platform_mem_module_object platform_mem_state;
+
+/*
+ * ======== platform_mem_setup ========
+ * Purpose:
+ * This will initialize the platform mem module.
+ */
+int platform_mem_setup(void)
+{
+ s32 retval = 0;
+
+ atomic_cmpmask_and_set(&platform_mem_state.ref_count,
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&platform_mem_state.ref_count)
+ != PLATFORM_MEM_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ /* Create the Gate handle */
+ platform_mem_state.gate =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (platform_mem_state.gate == NULL) {
+ retval = -ENOMEM;
+ goto gate_create_fail;
+ }
+
+ /* Construct the map table */
+ INIT_LIST_HEAD(&platform_mem_state.map_table);
+ mutex_init(platform_mem_state.gate);
+ goto exit;
+
+gate_create_fail:
+ atomic_set(&platform_mem_state.ref_count,
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0));
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_setup);
+
+/*
+ * ======== platform_mem_destroy ========
+ * Purpose:
+ * This will finalize the platform mem module.
+ */
+int platform_mem_destroy(void)
+{
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (atomic_dec_return(&platform_mem_state.ref_count)
+ == PLATFORM_MEM_MAKE_MAGICSTAMP(0)) {
+ list_del(&platform_mem_state.map_table);
+ /* Delete the gate handle */
+ kfree(platform_mem_state.gate);
+ }
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_destroy);
+
+/*
+ * ======== platform_mem_map ========
+ * Purpose:
+ * This will maps a memory area into virtual space.
+ */
+int platform_mem_map(memory_map_info *map_info)
+{
+ int retval = 0;
+ struct platform_mem_map_table_info *info = NULL;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(map_info == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+
+ }
+
+ if (map_info->src == (u32) NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ info = kmalloc(sizeof(struct platform_mem_map_table_info),
+ GFP_KERNEL);
+ if (info == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(platform_mem_state.gate);
+ if (retval)
+ goto lock_fail;
+
+ map_info->dst = 0;
+ if (map_info->is_cached == true)
+ map_info->dst = (u32) ioremap((dma_addr_t)
+ (map_info->src), map_info->size);
+ else
+ map_info->dst = (u32) ioremap_nocache((dma_addr_t)
+ (map_info->src), map_info->size);
+
+ if (map_info->dst == 0) {
+ retval = -EFAULT;
+ goto ioremap_fail;
+ }
+
+ /* Populate the info */
+ info->physical_address = map_info->src;
+ info->knl_virtual_address = map_info->dst;
+ info->size = map_info->size;
+ /* Put the info into the list */
+ list_add(&info->mem_entry, &platform_mem_state.map_table);
+ mutex_unlock(platform_mem_state.gate);
+ goto exit;
+
+ioremap_fail:
+ mutex_unlock(platform_mem_state.gate);
+lock_fail:
+ kfree(info);
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_map);
+
+/*
+ * ======== platform_mem_unmap ========
+ * Purpose:
+ * This will unmaps a memory area into virtual space.
+ */
+int platform_mem_unmap(memory_unmap_info *unmap_info)
+{
+ s32 retval = 0;
+ struct platform_mem_map_table_info *info = NULL;
+
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (unmap_info == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (unmap_info->addr == (u32) NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(platform_mem_state.gate);
+ if (retval)
+ goto exit;
+
+ iounmap((unsigned int *) unmap_info->addr);
+ /* Delete the node in the map table */
+ list_for_each_entry(info, &platform_mem_state.map_table, mem_entry) {
+ if (info->knl_virtual_address == unmap_info->addr) {
+ list_del(&info->mem_entry);
+ kfree(info);
+ break;
+ }
+ }
+ mutex_unlock(platform_mem_state.gate);
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_unmap);
+
+/*
+ * ======== platform_mem_map ========
+ * Purpose:
+ * This will translate an address.
+ */
+void *platform_mem_translate(void *src_addr, enum memory_xlt_flags flags)
+{
+ void *buf = NULL;
+ struct platform_mem_map_table_info *tinfo = NULL;
+ u32 frm_addr;
+ u32 to_addr;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(platform_mem_state.gate);
+ if (retval)
+ goto exit;
+
+ /* Traverse to the node in the map table */
+ list_for_each_entry(tinfo, &platform_mem_state.map_table, mem_entry) {
+ frm_addr = (flags == PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS) ?
+ tinfo->knl_virtual_address : tinfo->physical_address;
+ to_addr = (flags == PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS) ?
+ tinfo->physical_address : tinfo->knl_virtual_address;
+ if ((((u32) src_addr) >= frm_addr)
+ && (((u32) src_addr) < (frm_addr + tinfo->size))) {
+ buf = (void *) (to_addr + ((u32)src_addr - frm_addr));
+ break;
+ }
+ }
+ mutex_unlock(platform_mem_state.gate);
+
+exit:
+ return buf;
+}
+EXPORT_SYMBOL(platform_mem_translate);
+
diff --git a/drivers/dsp/syslink/multicore_ipc/platformcfg.c b/drivers/dsp/syslink/multicore_ipc/platformcfg.c
new file mode 100755
index 000000000000..360b275f0f43
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/platformcfg.c
@@ -0,0 +1,91 @@
+/*
+ * platformcfg.c
+ *
+ * Implementation of platform specific configuration for Syslink.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+
+
+/* Module headers */
+#include <sysmgr.h>
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== platform_override_config ========
+ * Purpose:
+ * Function to override the default confiuration values.
+ */
+int platform_override_config(struct sysmgr_config *config)
+{
+ int status = 0;
+
+ if (WARN_ON(config == NULL)) {
+ status = -EINVAL;
+ goto failure;
+ }
+
+ /* Override the multiproc default config */
+ config->multiproc_cfg.max_processors = 4;
+ config->multiproc_cfg.id = 0;
+ strcpy(config->multiproc_cfg.name_list[0], "MPU");
+ strcpy(config->multiproc_cfg.name_list[1], "Tesla");
+ strcpy(config->multiproc_cfg.name_list[2], "SysM3");
+ strcpy(config->multiproc_cfg.name_list[3], "AppM3");
+
+ /* Override the gatepeterson default config */
+
+ /* Override the sharedregion default config */
+ config->sharedregion_cfg.gate_handle = NULL;
+ config->sharedregion_cfg.heap_handle = NULL;
+ config->sharedregion_cfg.max_regions = 4;
+
+ /* Override the listmp default config */
+
+ /* Override the messageq default config */
+ /* We use 2 heaps, 1 for APPM3 and 1 for SYSM3 */
+ /* FIXME: Temporary Fix - Add one more for the SW DMM heap */
+ if (config->messageq_cfg.num_heaps < 3)
+ config->messageq_cfg.num_heaps = 3;
+
+ /* Override the notify default config */
+ config->notify_cfg.maxDrivers = 2;
+
+ /* Override the procmgr default config */
+
+ /* Override the heapbuf default config */
+
+ /* Override the listmp_sharedmemory default config */
+
+ /* Override the messageq_transportshm default config */
+
+ /* Override the notify ducati driver default config */
+
+ /* Override the nameserver remotenotify default config */
+ goto success;
+
+failure:
+ printk(KERN_ERR "platform_override_config failed [0x%x]", status);
+success:
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sharedregion.c b/drivers/dsp/syslink/multicore_ipc/sharedregion.c
new file mode 100755
index 000000000000..cfcd1aa1ee53
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sharedregion.c
@@ -0,0 +1,800 @@
+/*
+ * sharedregion.c
+ *
+ * The SharedRegion module is designed to be used in a
+ * multi-processor environment where there are memory regions
+ * that are shared and accessed across different processors
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <syslink/atomic_linux.h>
+
+#include <multiproc.h>
+#include <nameserver.h>
+#include <sharedregion.h>
+
+/* Macro to make a correct module magic number with refCount */
+#define SHAREDREGION_MAKE_MAGICSTAMP(x) ((SHAREDREGION_MODULEID << 16u) | (x))
+
+#define SHAREDREGION_MAX_REGIONS_DEFAULT 4
+
+/*
+ * Module state object
+ */
+struct sharedregion_module_object {
+ atomic_t ref_count; /* Reference count */
+ struct mutex *gate_handle;
+ struct sharedregion_info *table; /* Ptr to the table */
+ u32 bitOffset; /* Index bit offset */
+ u32 region_size; /* Max size of each region */
+ struct sharedregion_config cfg; /* Current config values */
+ u32 *ref_count_table; /* The number of times each
+ entry has been added */
+};
+
+/*
+ * Shared region state object variable with default settings
+ */
+static struct sharedregion_module_object sharedregion_state = {
+ .cfg.heap_handle = NULL,
+ .cfg.gate_handle = NULL,
+ .cfg.max_regions = SHAREDREGION_MAX_REGIONS_DEFAULT
+};
+
+/*
+ * ======== sharedregion_get_config ========
+ * Purpose:
+ * This will get sharedregion module configiguration
+ */
+int sharedregion_get_config(struct sharedregion_config *config)
+{
+ BUG_ON((config == NULL));
+ memcpy(config, &sharedregion_state.cfg,
+ sizeof(struct sharedregion_config));
+ return 0;
+}
+EXPORT_SYMBOL(sharedregion_get_config);
+
+
+/*
+ * ======== sharedregion_get_bitoffset ========
+ * Purpose:
+ * This will get get the bit offset
+ */
+static u32 sharedregion_get_bitoffset(u32 max_regions)
+{
+ u32 i;
+ u32 bitoffset = 0;
+ for (i = ((sizeof(void *) * 8) - 1); i >= 0; i--) {
+ if (max_regions > (1 << i))
+ break;
+ }
+
+ bitoffset = (((sizeof(void *) * 8) - 1) - i);
+ return bitoffset;
+}
+
+/*
+ * ======== sharedregion_setup ========
+ * Purpose:
+ * This will get setup the sharedregion module
+ */
+int sharedregion_setup(const struct sharedregion_config *config)
+{
+ struct sharedregion_config *tmpcfg = &sharedregion_state.cfg;
+ struct sharedregion_info *table = NULL;
+ u32 i;
+ u32 j;
+ s32 retval = 0;
+ u16 proc_count;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable
+ */
+ atomic_cmpmask_and_set(&sharedregion_state.ref_count,
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&sharedregion_state.ref_count)
+ != SHAREDREGION_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (config != NULL) {
+ if (WARN_ON(config->max_regions == 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ memcpy(&sharedregion_state.cfg, config,
+ sizeof(struct sharedregion_config));
+ }
+
+ sharedregion_state.gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (sharedregion_state.gate_handle == NULL)
+ goto gate_create_fail;
+
+ sharedregion_state.bitOffset =
+ sharedregion_get_bitoffset(tmpcfg->max_regions);
+ sharedregion_state.region_size = (1 << sharedregion_state.bitOffset);
+ proc_count = multiproc_get_max_processors();
+ /* TODO check heap usage & + 1 ? */
+ sharedregion_state.table = kmalloc(sizeof(struct sharedregion_info) *
+ tmpcfg->max_regions * (proc_count + 1),
+ GFP_KERNEL);
+ if (sharedregion_state.table == NULL) {
+ retval = -ENOMEM;
+ goto table_alloc_fail;
+ }
+
+ sharedregion_state.ref_count_table = kmalloc(sizeof(u32) *
+ tmpcfg->max_regions * (proc_count + 1),
+ GFP_KERNEL);
+ if (sharedregion_state.ref_count_table == NULL) {
+ retval = -ENOMEM;
+ goto table_alloc_fail;
+ }
+
+ table = sharedregion_state.table;
+ for (i = 0; i < tmpcfg->max_regions; i++) {
+ for (j = 0; j < (proc_count + 1); j++) {
+ (table + (j * tmpcfg->max_regions) + i)->is_valid =
+ false;
+ (table + (j * tmpcfg->max_regions) + i)->base = 0;
+ (table + (j * tmpcfg->max_regions) + i)->len = 0;
+ sharedregion_state.ref_count_table[(j *
+ tmpcfg->max_regions) + i] = 0;
+ }
+ }
+
+ mutex_init(sharedregion_state.gate_handle);
+ return 0;
+
+table_alloc_fail:
+ kfree(sharedregion_state.gate_handle);
+
+gate_create_fail:
+ memset(&sharedregion_state, 0,
+ sizeof(struct sharedregion_module_object));
+ sharedregion_state.cfg.max_regions = SHAREDREGION_MAX_REGIONS_DEFAULT;
+
+error:
+ printk(KERN_ERR "sharedregion_setup failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_setup);
+
+/*
+ * ======== sharedregion_destroy ========
+ * Purpose:
+ * This will get destroy the sharedregion module
+ */
+int sharedregion_destroy(void)
+{
+ s32 retval = 0;
+ void *gate_handle = NULL;
+
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (!(atomic_dec_return(&sharedregion_state.ref_count)
+ == SHAREDREGION_MAKE_MAGICSTAMP(0))) {
+ retval = 1; /* Syslink is not handling this on 2.0.0.06 */
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (retval)
+ goto error;
+
+ kfree(sharedregion_state.ref_count_table);
+ kfree(sharedregion_state.table);
+ gate_handle = sharedregion_state.gate_handle; /* backup gate handle */
+ memset(&sharedregion_state, 0,
+ sizeof(struct sharedregion_module_object));
+ sharedregion_state.cfg.max_regions = SHAREDREGION_MAX_REGIONS_DEFAULT;
+ mutex_unlock(gate_handle);
+ kfree(gate_handle);
+ return 0;
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_destroy failed status:%x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_destroy);
+
+/*
+ * ======== sharedregion_add ========
+ * Purpose:
+ * This will add a memory segment to the lookup table
+ * during runtime by base and length
+ */
+int sharedregion_add(u32 index, void *base, u32 len)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ s32 retval = 0;
+ u32 i;
+ u16 myproc_id;
+ bool overlap = false;
+ bool same = false;
+
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (index >= sharedregion_state.cfg.max_regions ||
+ sharedregion_state.region_size < len) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ myproc_id = multiproc_get_id(NULL);
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (retval)
+ goto error;
+
+
+ table = sharedregion_state.table;
+ /* Check for overlap */
+ for (i = 0; i < sharedregion_state.cfg.max_regions; i++) {
+ entry = (table
+ + (myproc_id * sharedregion_state.cfg.max_regions)
+ + i);
+ if (entry->is_valid) {
+ /* Handle duplicate entry */
+ if ((base == entry->base) && (len == entry->len)) {
+ same = true;
+ break;
+ }
+
+ if ((base >= entry->base) &&
+ (base < (void *)((u32)entry->base + entry->len))) {
+ overlap = true;
+ break;
+ }
+
+ if ((base < entry->base) &&
+ (void *)((u32)base + len) >= entry->base) {
+ overlap = true;
+ break;
+ }
+ }
+ }
+
+ if (same) {
+ retval = 1;
+ goto success;
+ }
+
+ if (overlap) {
+ /* FHACK: FIX ME */
+ retval = 1;
+ goto mem_overlap_error;
+ }
+
+ entry = (table
+ + (myproc_id * sharedregion_state.cfg.max_regions)
+ + index);
+ if (entry->is_valid == false) {
+ entry->base = base;
+ entry->len = len;
+ entry->is_valid = true;
+
+ } else {
+ /* FHACK: FIX ME */
+ sharedregion_state.ref_count_table[(myproc_id *
+ sharedregion_state.cfg.max_regions)
+ + index] += 1;
+ retval = 1;
+ goto dup_entry_error;
+ }
+
+success:
+ mutex_unlock(sharedregion_state.gate_handle);
+ return 0;
+
+dup_entry_error: /* Fall through */
+mem_overlap_error:
+ printk(KERN_WARNING "sharedregion_add entry exists status: %x\n",
+ retval);
+ mutex_unlock(sharedregion_state.gate_handle);
+
+error:
+ if (retval < 0)
+ printk(KERN_ERR "sharedregion_add failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_add);
+
+/*
+ * ======== sharedregion_remove ========
+ * Purpose:
+ * This will removes a memory segment to the lookup table
+ * during runtime by base and length
+ */
+int sharedregion_remove(u32 index)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ u16 myproc_id;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (index >= sharedregion_state.cfg.max_regions) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (retval)
+ goto error;
+
+ myproc_id = multiproc_get_id(NULL);
+ table = sharedregion_state.table;
+ entry = (table
+ + (myproc_id * sharedregion_state.cfg.max_regions)
+ + index);
+
+ if (sharedregion_state.ref_count_table[(myproc_id *
+ sharedregion_state.cfg.max_regions)
+ + index] > 0)
+ sharedregion_state.ref_count_table[(myproc_id *
+ sharedregion_state.cfg.max_regions)
+ + index] -= 1;
+ else {
+ entry->is_valid = false;
+ entry->base = NULL;
+ entry->len = 0;
+ }
+ mutex_unlock(sharedregion_state.gate_handle);
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_remove failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_remove);
+
+/*
+ * ======== sharedregion_get_index ========
+ * Purpose:
+ * This will return the index for the specified address pointer.
+ */
+int sharedregion_get_index(void *addr)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ bool found = false;
+ u32 i;
+ u16 myproc_id;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ myproc_id = multiproc_get_id(NULL);
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (retval) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ table = sharedregion_state.table;
+ for (i = 0; i < sharedregion_state.cfg.max_regions; i++) {
+ entry = (table
+ + (myproc_id * sharedregion_state.cfg.max_regions)
+ + i);
+ if ((addr >= entry->base) &&
+ (addr < (void *)((u32)entry->base + (entry->len)))) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ retval = i;
+ else
+ retval = -ENOENT; /* No entry found in the table */
+
+ mutex_unlock(sharedregion_state.gate_handle);
+ return retval;
+
+exit:
+ printk(KERN_ERR "sharedregion_get_index failed index:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_get_index);
+
+/*
+ * ======== sharedregion_get_ptr ========
+ * Purpose:
+ * This will return the address pointer associated with the
+ * shared region pointer
+ */
+void *sharedregion_get_ptr(u32 *srptr)
+{
+ struct sharedregion_info *entry = NULL;
+ void *ptr = NULL;
+ u16 myproc_id;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (srptr == SHAREDREGION_INVALIDSRPTR)
+ goto error;
+
+ myproc_id = multiproc_get_id(NULL);
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (WARN_ON(retval != 0))
+ goto error;
+
+ entry = (sharedregion_state.table
+ + (myproc_id * sharedregion_state.cfg.max_regions)
+ + ((u32)srptr >> sharedregion_state.bitOffset));
+ /* TO DO check:: is this correct ? */
+ ptr = ((void *)(((u32)srptr &
+ ((1 << sharedregion_state.bitOffset) - 1)) + (u32)entry->base));
+ mutex_unlock(sharedregion_state.gate_handle);
+ return ptr;
+
+error:
+ printk(KERN_ERR "sharedregion_get_ptr failed \n");
+ return (void *)NULL;
+
+}
+EXPORT_SYMBOL(sharedregion_get_ptr);
+
+/*
+ * ======== sharedregion_get_srptr ========
+ * Purpose:
+ * This will return sharedregion pointer associated with the
+ * an address in a shared region area registered with the
+ * sharedregion module
+ */
+u32 *sharedregion_get_srptr(void *addr, s32 index)
+{
+ struct sharedregion_info *entry = NULL;
+ u32 *ptr = SHAREDREGION_INVALIDSRPTR ;
+ u32 myproc_id;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(addr == NULL))
+ goto error;
+
+ if (WARN_ON(index >= sharedregion_state.cfg.max_regions))
+ goto error;
+
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (WARN_ON(retval != 0))
+ goto error;
+
+ myproc_id = multiproc_get_id(NULL);
+ entry = (sharedregion_state.table
+ + (myproc_id * sharedregion_state.cfg.max_regions)
+ + index);
+ ptr = (u32 *) ((index << sharedregion_state.bitOffset)
+ | ((u32)addr - (u32)entry->base));
+ mutex_unlock(sharedregion_state.gate_handle);
+ return ptr;
+
+error:
+ printk(KERN_ERR "sharedregion_get_srptr failed\n");
+ return (u32 *)NULL;
+}
+EXPORT_SYMBOL(sharedregion_get_srptr);
+
+/*
+ * ======== sharedregion_get_table_info ========
+ * Purpose:
+ * This will get the table entry information for the
+ * specified index and id
+ */
+int sharedregion_get_table_info(u32 index, u16 proc_id,
+ struct sharedregion_info *info)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ u16 proc_count;
+ s32 retval = 0;
+
+ BUG_ON(info == NULL);
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ if (index >= sharedregion_state.cfg.max_regions ||
+ proc_id >= proc_count) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (retval)
+ goto error;
+
+ table = sharedregion_state.table;
+ entry = (table
+ + (proc_id * sharedregion_state.cfg.max_regions)
+ + index);
+ memcpy((void *) info, (void *) entry, sizeof(struct sharedregion_info));
+ mutex_unlock(sharedregion_state.gate_handle);
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_get_table_info failed status:%x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_get_table_info);
+
+/*
+ * ======== sharedregion_set_table_info ========
+ * Purpose:
+ * This will set the table entry information for the
+ * specified index and id
+ */
+int sharedregion_set_table_info(u32 index, u16 proc_id,
+ struct sharedregion_info *info)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ u16 proc_count;
+ s32 retval = 0;
+
+ BUG_ON(info != NULL);
+ if (atomic_cmpmask_and_lt(&(sharedregion_state.ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ if (index >= sharedregion_state.cfg.max_regions ||
+ proc_id >= proc_count) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_state.gate_handle);
+ if (retval)
+ goto error;
+
+ table = sharedregion_state.table;
+ entry = (table
+ + (proc_id * sharedregion_state.cfg.max_regions)
+ + index);
+ memcpy((void *) entry, (void *) info, sizeof(struct sharedregion_info));
+ mutex_unlock(sharedregion_state.gate_handle);
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_set_table_info failed status:%x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_set_table_info);
+
+/*
+ * ======== Sharedregion_attach ========
+ * Purpose:
+ * This will attachs the shared region with an proc_id
+ *
+ * Application should call this function from the callback
+ * function registered for device attach to the processor
+ * manager. All modules which requires some logic setup
+ * to be done when a device gets attach to the system,
+ * should export API like this. Please see the below psuedo
+ * code for example:
+ * Example
+ * code
+ * void function (proc_id, config) {
+ * NotifyDriver_attach (proc_id, config->ndParams);
+ * SysMemMgr_attach (proc_id);
+ * SMM_attach (proc_id);
+ * NameServerRemoteTransport_attach(proc_id, config->nsrtParams);
+ * SharedRegion_attach (proc_id);
+ * SharedMemory_getConfig (&cfg);
+ * for (i = 0u; i < cfg->maxRegions; i++) {
+ * SharedRegion_getTableInfo(i, &myinfo, myProcId);
+ * SharedRegion_getTableInfo(i, &peerinfo, proc_id);
+ * DMM_map (proc_id,
+ * PA(myinfo->vaddr),
+ * peerinfo->vaddr,
+ * myinfo->len);
+ * }
+ * ...
+ * }
+ *
+ * main () {
+ * # attach callback for device attach only
+ * ProcMgr_register (function, proc_id, DEV_ATTACH);
+ * }
+ *
+ */
+void sharedregion_attach(u16 proc_id)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ char *hexstr = "0123456789ABCDEF";
+ char tname[80];
+ u16 proc_id_list[2];
+ u32 addr = 0;
+ u32 len;
+ u16 proc_count;
+ void *nshandle;
+ s32 retval = 0;
+ s32 i;
+
+ if (WARN_ON(sharedregion_state.table == NULL))
+ goto error;
+
+ proc_count = multiproc_get_max_processors();
+ if (WARN_ON(proc_id >= proc_count))
+ goto error;
+
+ proc_id_list[0] = proc_id;
+ proc_id_list[1] = MULTIPROC_INVALIDID;
+ nshandle = nameserver_get_handle(SHAREDREGION_NAMESERVER);
+ if (nshandle == NULL)
+ goto error;
+
+ /* Get Shared region entries from the remote shared region nameserver */
+ for (i = 0u; i < sharedregion_state.cfg.max_regions; i++) {
+ memset(tname, 0, 80);
+ strcpy(tname, "SHAREDREGION:SRENTRY_ADDR_");
+ tname[strlen(tname)] = hexstr[(proc_id >> 4u) & 0xF];
+ tname[strlen(tname)] = hexstr[proc_id & 0xF];
+ tname[strlen(tname)] = '_';
+ tname[strlen(tname)] = hexstr[(i >> 28u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 24u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 20u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 16u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 12u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 8u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 4u) & 0xF];
+ tname[strlen(tname)] = hexstr[i & 0xF];
+ retval = nameserver_get(nshandle, tname,
+ &len, sizeof(u32), &proc_id_list[0]);
+ if (WARN_ON(retval))
+ ;
+
+ memset(tname, 0, 80u);
+ strcpy(tname, "SHAREDREGION:SRENTRY_LEN_");
+ tname[strlen(tname)] = hexstr[(proc_id >> 4u) & 0xF];
+ tname[strlen(tname)] = hexstr[proc_id & 0xF];
+ tname[strlen(tname)] = '_';
+ tname[strlen(tname)] = hexstr[(i >> 28u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 24u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 20u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 16u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 12u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 8u) & 0xF];
+ tname[strlen(tname)] = hexstr[(i >> 4u) & 0xF];
+ tname[strlen(tname)] = hexstr[i & 0xF];
+
+ /* TO DO : check this */
+ retval = nameserver_get(nshandle, tname,
+ &len, sizeof(u32), &proc_id_list[0]);
+ if (WARN_ON(retval))
+ ;
+
+ /* Found an entry in the remote nameserver */
+ /* Add it into the shared region table */
+ if (retval == 0) {
+ retval = mutex_lock_interruptible(
+ sharedregion_state.gate_handle);
+ if (WARN_ON(retval))
+ break;
+
+ table = sharedregion_state.table;
+ /* mark entry invalid */
+ entry = (table
+ + (proc_id * sharedregion_state.cfg.max_regions)
+ + i);
+ entry->base = (void *)addr;
+ entry->len = len;
+ entry->is_valid = false;
+ mutex_unlock(sharedregion_state.gate_handle);
+ }
+
+ }
+
+error:
+ return;
+}
+EXPORT_SYMBOL(sharedregion_attach);
+
+/*
+ * ======== Sharedregion_detach ========
+ * Purpose:
+ * This will detachs the shared region for an proc_id
+ *
+ * Application should call this function from the callback
+ * function registered for device detach to the processorl
+ * manager. All modules which requires some logic setup
+ * to be done when a device gets detach from the system,
+ * should export API like this.
+ * Please see the below psuedo code for example:
+ * @Example
+ * @code
+ * void function (proc_id) {
+ * SharedRegion_detach (proc_id);
+ * SysMemMgr_detach (proc_id);
+ * ...
+ * # Name server must be detached last
+ * nameserver_detach (proc_id);
+ * }
+ *
+ * main () {
+ * # attach callback for device detach only
+ * Processor_register (function, proc_id, DEV_DETACH);
+ * }
+ *
+ */
+void sharedregion_detach(u16 proc_id)
+{
+ u16 proc_count;
+
+ if (WARN_ON(sharedregion_state.table == NULL))
+ goto error;
+
+ proc_count = multiproc_get_max_processors();
+ if (WARN_ON(proc_id >= proc_count))
+ goto error;
+
+error:
+ return;
+}
+EXPORT_SYMBOL(sharedregion_detach);
+
diff --git a/drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c b/drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c
new file mode 100755
index 000000000000..863bb1d4a033
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c
@@ -0,0 +1,354 @@
+/*
+ * sharedregion_ioctl.c
+ *
+ * The sharedregion module is designed to be used in a
+ * multi-processor environment where there are memory regions
+ * that are shared and accessed across different processors
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <sharedregion_ioctl.h>
+#include <platform_mem.h>
+
+/*
+ * ======== sharedregion_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_config function
+ */
+static int sharedregion_ioctl_get_config(struct sharedregion_cmd_args *cargs)
+{
+
+ struct sharedregion_config config;
+ s32 status = 0;
+ s32 size;
+
+ cargs->api_status = sharedregion_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct sharedregion_config));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+
+/*
+ * ======== sharedregion_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to sharedregion_setup function
+ */
+static int sharedregion_ioctl_setup(struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_config config;
+ struct sharedregion_config defaultcfg;
+ struct sharedregion_info info;
+ struct sharedregion_info *table;
+ u32 proc_count = 0;
+ u32 i;
+ u32 j;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct sharedregion_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sharedregion_setup(&config);
+ if (cargs->api_status != 0)
+ goto exit;
+
+ cargs->api_status = sharedregion_get_config(&defaultcfg);
+ size = copy_to_user(cargs->args.setup.default_cfg,
+ &defaultcfg,
+ sizeof(struct sharedregion_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ table = cargs->args.setup.table;
+ for (i = 0; i < config.max_regions; i++) {
+ for (j = 0; j < (proc_count); j++) {
+ sharedregion_get_table_info(i, j, &info);
+ if (info.is_valid == true) {
+ /* Convert kernel virtual address to physical
+ * addresses */
+ info.base = platform_mem_translate(info.base,
+ PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS);
+ size = copy_to_user((void *) (table
+ + (j * config.max_regions)
+ + i),
+ (void *) &info,
+ sizeof(
+ struct sharedregion_info));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ } /* End of inner if */
+ } /* End of outer if */
+ } /* End of inner for loop */
+ }
+
+exit:
+ return status;
+}
+
+/*
+ * ======== sharedregion_ioctl_destroy========
+ * Purpose:
+ * This ioctl interface to sharedregion_destroy function
+ */
+static int sharedregion_ioctl_destroy(
+ struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_destroy();
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_add ========
+ * Purpose:
+ * This ioctl interface to sharedregion_add function
+ */
+static int sharedregion_ioctl_add(struct sharedregion_cmd_args *cargs)
+{
+ u32 base = (u32)platform_mem_translate(cargs->args.add.base,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ cargs->api_status = sharedregion_add(cargs->args.add.index,
+ (void *)base, cargs->args.add.len);
+ return 0;
+}
+
+
+
+/*
+ * ======== sharedregion_ioctl_get_index ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_index function
+ */
+static int sharedregion_ioctl_get_index(struct sharedregion_cmd_args *cargs)
+{
+ s32 index = 0;
+
+ index = sharedregion_get_index(cargs->args.get_index.addr);
+ cargs->args.get_index.index = index;
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_get_ptr ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_ptr function
+ */
+static int sharedregion_ioctl_get_ptr(struct sharedregion_cmd_args *cargs)
+{
+ void *addr = NULL;
+
+ addr = sharedregion_get_ptr(cargs->args.get_ptr.srptr);
+ /* We are not checking the return from the module, its user
+ responsibilty to pass proper value to application
+ */
+ cargs->args.get_ptr.addr = addr;
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_get_srptr ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_srptr function
+ */
+static int sharedregion_ioctl_get_srptr(struct sharedregion_cmd_args *cargs)
+{
+ u32 *srptr = NULL;
+
+ srptr = sharedregion_get_srptr(cargs->args.get_srptr.addr,
+ cargs->args.get_srptr.index);
+ /* We are not checking the return from the module, its user
+ responsibilty to pass proper value to application
+ */
+ cargs->args.get_srptr.srptr = srptr;
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_get_table_info ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_table_info function
+ */
+static int sharedregion_ioctl_get_table_info(
+ struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_info info;
+ s32 status = 0;
+ s32 size;
+
+ cargs->api_status = sharedregion_get_table_info(
+ cargs->args.get_table_info.index,
+ cargs->args.get_table_info.proc_id, &info);
+ size = copy_to_user(cargs->args.get_table_info.info, &info,
+ sizeof(struct sharedregion_info));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+
+/*
+ * ======== sharedregion_ioctl_remove ========
+ * Purpose:
+ * This ioctl interface to sharedregion_remove function
+ */
+static int sharedregion_ioctl_remove(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_remove(cargs->args.remove.index);
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_set_table_info ========
+ * Purpose:
+ * This ioctl interface to sharedregion_set_table_info function
+ */
+static int sharedregion_ioctl_set_table_info(
+ struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_info info;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&info, cargs->args.set_table_info.info,
+ sizeof(struct sharedregion_info));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sharedregion_set_table_info(
+ cargs->args.set_table_info.index,
+ cargs->args.set_table_info.proc_id, &info);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== sharedregion_ioctl ========
+ * Purpose:
+ * This ioctl interface for sharedregion module
+ */
+int sharedregion_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct sharedregion_cmd_args __user *uarg =
+ (struct sharedregion_cmd_args __user *)args;
+ struct sharedregion_cmd_args cargs;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct sharedregion_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_SHAREDREGION_GETCONFIG:
+ status = sharedregion_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_SETUP:
+ status = sharedregion_ioctl_setup(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_DESTROY:
+ status = sharedregion_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_ADD:
+ status = sharedregion_ioctl_add(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_GETINDEX:
+ status = sharedregion_ioctl_get_index(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_GETPTR:
+ status = sharedregion_ioctl_get_ptr(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_GETSRPTR:
+ status = sharedregion_ioctl_get_srptr(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_GETTABLEINFO:
+ status = sharedregion_ioctl_get_table_info(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_REMOVE:
+ status = sharedregion_ioctl_remove(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_SETTABLEINFO:
+ status = sharedregion_ioctl_set_table_info(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct sharedregion_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmemmgr.c b/drivers/dsp/syslink/multicore_ipc/sysmemmgr.c
new file mode 100644
index 000000000000..60bdad0c188f
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmemmgr.c
@@ -0,0 +1,459 @@
+/*
+ * sysmemmgr.c
+ *
+ * Manager for the Slave system memory. Slave system level memory is allocated
+ * through this modules.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utils headers */
+#include <linux/vmalloc.h>
+#include <syslink/atomic_linux.h>
+#include <syslink/platform_mem.h>
+/*#include <GateMutex.h>
+#include <Memory.h>
+#include <Trace.h>*/
+
+
+/* Module level headers */
+#include <sysmemmgr.h>
+/*#include <BuddyPageAllocator.h>*/
+
+
+/* =============================================================================
+ * Macros
+ * =============================================================================
+ */
+/*! @brief Event reserved for System memory manager */
+#define SYSMEMMGR_EVENTNO 12
+
+/* Macro to make a correct module magic number with ref_count */
+#define SYSMEMMGR_MAKE_MAGICSTAMP(x) ((SYSMEMMGR_MODULEID << 12) | (x))
+
+/* =============================================================================
+ * Structs & Enums
+ * =============================================================================
+ */
+/*! @brief Structure containing list of buffers. The list is kept sorted by
+ * address. */
+struct sysmemmgr_static_mem_struct {
+ struct sysmemmgr_static_mem_struct *next;
+ /*!< Pointer to next entry */
+ u32 address;
+ /*!< Address of this entry */
+ u32 size;
+ /*!< Size of this entry */
+};
+
+
+/*! @brief Static memory manager object. */
+struct sysmemmgr_static_mem_mgr_obj {
+ struct sysmemmgr_static_mem_struct head;
+ /*!< Pointer to head entry */
+ struct sysmemmgr_static_mem_struct tail;
+ /*!< Pointer to tail entry */
+};
+
+/*!
+ * @brief Structure defining state object of system memory manager.
+ */
+struct sysmemmgr_module_object {
+ atomic_t ref_count;
+ /*!< Reference count */
+ struct sysmemmgr_static_mem_mgr_obj static_mem_obj;
+ /*!< Static memory manager object */
+ struct mutex *gate_handle;
+ /*!< Pointer to lock */
+ struct sysmemmgr_config cfg;
+ /*!< Current configuration values */
+ struct sysmemmgr_config default_cfg;
+ /*!< Default configuration values */
+};
+
+
+/*!
+ * @brief Object containing state of the system memory manager.
+ */
+static struct sysmemmgr_module_object sysmemmgr_state = {
+ .default_cfg.sizeof_valloc = 0x100000,
+ .default_cfg.sizeof_palloc = 0x100000,
+ .default_cfg.page_size = 0x1000,
+ .default_cfg.event_no = SYSMEMMGR_EVENTNO,
+};
+
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== sysmemmgr_get_config ========
+ * Purpose:
+ * Function to get the default values for configuration.
+ */
+void sysmemmgr_get_config(struct sysmemmgr_config *config)
+{
+ if (WARN_ON(config == NULL))
+ goto err_exit;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0),
+ SYSMEMMGR_MAKE_MAGICSTAMP(1)) == true)
+ memcpy((void *) config, (void *)(&sysmemmgr_state.default_cfg),
+ sizeof(struct sysmemmgr_config));
+ else
+ memcpy((void *) config, (void *)(&sysmemmgr_state.cfg),
+ sizeof(struct sysmemmgr_config));
+
+ return;
+
+err_exit:
+ printk(KERN_ERR "sysmemmgr_get_config: Argument of type "
+ "(struct sysmemmgr_config *) passed is NULL\n");
+ return;
+}
+
+
+/*
+ * ======== sysmemmgr_setup ========
+ * Purpose:
+ * Function to get the default values for configuration.
+ */
+int sysmemmgr_setup(struct sysmemmgr_config *config)
+{
+ int status = 0;
+ struct sysmemmgr_static_mem_mgr_obj *smmObj = NULL;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&sysmemmgr_state.ref_count,
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&sysmemmgr_state.ref_count) != \
+ SYSMEMMGR_MAKE_MAGICSTAMP(1)) {
+ status = SYSMEMMGR_S_ALREADYSETUP;
+ goto exit;
+ }
+
+ if (WARN_ON(config == NULL)) {
+ /* Config parameters are not provided */
+ status = -EINVAL;
+ goto err_config;
+ }
+ if (WARN_ON((config->static_virt_base_addr == (u32) NULL)
+ && (config->static_mem_size != 0))) {
+ /* Virtual Base address of static memory region is NULL */
+ status = -EINVAL;
+ goto err_virt_addr;
+ }
+ if (WARN_ON((config->static_phys_base_addr == (u32) NULL)
+ && (config->static_mem_size != 0))) {
+ /*Physical Base address of static memory region is NULL */
+ status = -EINVAL;
+ goto err_phys_addr;
+ }
+
+ /* Copy the config parameters to the module state */
+ memcpy((void *)(&sysmemmgr_state.cfg), (void *) config,
+ sizeof(struct sysmemmgr_config));
+
+ /* Create the static memory allocator */
+ if (config->static_mem_size != 0) {
+ smmObj = &sysmemmgr_state.static_mem_obj;
+ smmObj->head.address = config->static_virt_base_addr;
+ smmObj->head.size = 0;
+ smmObj->tail.address = (config->static_virt_base_addr + \
+ config->static_mem_size);
+ smmObj->tail.size = 0;
+ smmObj->head.next = &smmObj->tail;
+ smmObj->tail.next = NULL;
+ }
+
+ /* Create the lock */
+ sysmemmgr_state.gate_handle = kzalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (sysmemmgr_state.gate_handle == NULL) {
+ /* Failed to create gate handle */
+ status = -ENOMEM;
+ goto err_mem_gate;
+ }
+ return 0;
+
+err_mem_gate:
+ printk(KERN_ERR "sysmemmgr_setup: Failed to create gate handle\n");
+ goto exit;
+
+err_phys_addr:
+ printk(KERN_ERR "sysmemmgr_setup: Physical Base address of static "
+ "memory region is NULL\n");
+ goto exit;
+
+err_virt_addr:
+ printk(KERN_ERR "sysmemmgr_setup: Virtual Base address of static "
+ "memory region is NULL\n");
+ goto exit;
+
+err_config:
+ printk(KERN_ERR "sysmemmgr_setup: Argument of type "
+ "(struct sysmemmgr_config *) passed is NULL\n");
+ goto exit;
+
+exit:
+ if (status < 0) {
+ atomic_set(&sysmemmgr_state.ref_count,
+ SYSMEMMGR_MAKE_MAGICSTAMP(0));
+ }
+ return status;
+}
+
+
+/*
+ * ======== sysmemmgr_destroy ========
+ * Purpose:
+ * Function to finalize the system memory manager module.
+ */
+int sysmemmgr_destroy(void)
+{
+ int status = 0;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(1)) == \
+ true) {
+ /*! @retval SYSMEMMGR_E_INVALIDSTATE Module was not
+ * initialized */
+ status = SYSMEMMGR_E_INVALIDSTATE;
+ goto err_exit;
+ }
+
+ if (atomic_dec_return(&sysmemmgr_state.ref_count) == \
+ SYSMEMMGR_MAKE_MAGICSTAMP(0)) {
+ /* Delete the lock */
+ kfree(sysmemmgr_state.gate_handle);
+ }
+ return 0;
+
+err_exit:
+ printk(KERN_ERR "sysmemgr_destroy: Module was not initialized\n");
+ return status;
+}
+
+
+/*
+ * ======== sysmemmgr_alloc ========
+ * Purpose:
+ * Function to allocate a memory block.
+ */
+void *sysmemmgr_alloc(u32 size, enum sysmemmgr_allocflag flag)
+{
+ int status = 0;
+ struct sysmemmgr_static_mem_mgr_obj *smObj = NULL;
+ struct sysmemmgr_static_mem_struct *ptr = NULL;
+ struct sysmemmgr_static_mem_struct *newptr = NULL;
+ void *ret_ptr = NULL;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(1)) == \
+ true) {
+ /*! @retval SYSMEMMGR_E_INVALIDSTATE Module was not
+ * initialized */
+ status = SYSMEMMGR_E_INVALIDSTATE;
+ goto err_exit;
+ }
+
+ if ((flag & sysmemmgr_allocflag_physical) && \
+ !(flag & sysmemmgr_allocflag_dma)) {
+ /* TBD: works with DMM
+ ret_ptr = platform_mem_alloc (size, 0,
+ MemoryOS_MemTypeFlags_Physical); */
+ if (ret_ptr == NULL) {
+ if (sysmemmgr_state.cfg.static_mem_size == 0) {
+ /* Memory pool is not configured. */
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ smObj = &sysmemmgr_state.static_mem_obj;
+ ptr = &smObj->head;
+ while (ptr && ptr->next) {
+ if (((ptr->next->address - \
+ (ptr->address + ptr->size)) >= size))
+ break;
+ ptr = ptr->next;
+ }
+
+ if (ptr->next == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ newptr = vmalloc(
+ sizeof(struct sysmemmgr_static_mem_struct));
+ if (newptr != NULL) {
+ newptr->address = ptr->address + ptr->size;
+ newptr->size = size;
+ newptr->next = ptr->next;
+ ptr->next = newptr;
+ ret_ptr = (void *) newptr->address;
+ } else {
+ status = -ENOMEM;
+ }
+ }
+ goto exit;
+ }
+
+ if (flag & sysmemmgr_allocflag_physical) {
+ ret_ptr = kmalloc(size, GFP_KERNEL);
+ if (ret_ptr == NULL)
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ if (flag & sysmemmgr_allocflag_dma) {
+ ret_ptr = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (ret_ptr == NULL)
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ ret_ptr = vmalloc(size);
+ if (ret_ptr == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+err_exit:
+ printk(KERN_ERR "sysmemgr_alloc: Module was not initialized\n");
+exit:
+ if (WARN_ON(ret_ptr == NULL))
+ printk(KERN_ERR "sysmemmgr_alloc: Allocation failed\n");
+ return ret_ptr;
+}
+
+
+/*
+ * ======== sysmemmgr_free ========
+ * Purpose:
+ * Function to de-allocate a previous allocated memory block.
+ */
+int sysmemmgr_free(void *blk, u32 size, enum sysmemmgr_allocflag flag)
+{
+ int status = 0;
+ struct sysmemmgr_static_mem_mgr_obj *smObj = NULL;
+ struct sysmemmgr_static_mem_struct *ptr = NULL;
+ struct sysmemmgr_static_mem_struct *prev = NULL;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(1)) == \
+ true) {
+ /*! @retval SYSMEMMGR_E_INVALIDSTATE Module was not
+ * initialized */
+ status = SYSMEMMGR_E_INVALIDSTATE;
+ goto err_exit;
+ }
+
+ if ((flag & sysmemmgr_allocflag_physical) && \
+ !(flag & sysmemmgr_allocflag_dma)) {
+ if (((u32) blk >= sysmemmgr_state.cfg.static_virt_base_addr)
+ && ((u32) blk < \
+ (sysmemmgr_state.cfg.static_virt_base_addr + \
+ sysmemmgr_state.cfg.static_mem_size))) {
+ smObj = &sysmemmgr_state.static_mem_obj;
+ ptr = &smObj->head;
+ while (ptr && ptr->next) {
+ if (ptr->next->address == (u32) blk)
+ break;
+ ptr = ptr->next;
+ }
+ prev = ptr;
+ ptr = ptr->next;
+ prev->next = ptr->next;
+
+ /* Free the node */
+ vfree(ptr);
+ } else {
+ kfree(blk);
+ }
+ } else if (flag & sysmemmgr_allocflag_physical) {
+ kfree(blk);
+ } else if (flag & sysmemmgr_allocflag_dma) {
+ kfree(blk);
+ } else {
+ vfree(blk);
+ }
+ return 0;
+
+err_exit:
+ printk(KERN_ERR "sysmemgr_free: Module was not initialized\n");
+ return status;
+}
+
+
+/*
+ * ======== sysmemmgr_setup ========
+ * Purpose:
+ * Function to translate an address among different address spaces.
+ */
+void *sysmemmgr_translate(void *src_addr, enum sysmemmgr_xltflag flags)
+{
+ void *ret_ptr = NULL;
+
+ switch (flags) {
+ case sysmemmgr_xltflag_kvirt2phys:
+ {
+ if (((u32) src_addr >= \
+ sysmemmgr_state.cfg.static_virt_base_addr) && \
+ ((u32) src_addr < \
+ (sysmemmgr_state.cfg.static_virt_base_addr + \
+ sysmemmgr_state.cfg.static_mem_size))) {
+ ret_ptr = (void *)(((u32) src_addr - \
+ sysmemmgr_state.cfg.static_virt_base_addr) + \
+ (sysmemmgr_state.cfg.static_phys_base_addr));
+ } else {
+ ret_ptr = platform_mem_translate(src_addr,
+ PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS);
+ }
+ }
+ break;
+
+ case sysmemmgr_xltflag_phys2kvirt:
+ {
+ if (((u32) src_addr >= \
+ sysmemmgr_state.cfg.static_phys_base_addr) && \
+ ((u32) src_addr < \
+ (sysmemmgr_state.cfg.static_phys_base_addr + \
+ sysmemmgr_state.cfg.static_mem_size))) {
+ ret_ptr = (void *)(((u32) src_addr - \
+ sysmemmgr_state.cfg.static_phys_base_addr) + \
+ (sysmemmgr_state.cfg.static_virt_base_addr));
+ } else {
+ ret_ptr = platform_mem_translate(src_addr,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ }
+ }
+ break;
+
+ default:
+ {
+ printk(KERN_ALERT "sysmemmgr_translate: Unhandled translation "
+ "flag\n");
+ }
+ break;
+ }
+
+ return ret_ptr;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c b/drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c
new file mode 100644
index 000000000000..591e04849873
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c
@@ -0,0 +1,227 @@
+/*
+ * sysmemmgr_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the sysmemmgr
+ * module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+/* Module Headers */
+#include <sysmemmgr.h>
+#include <sysmemmgr_ioctl.h>
+
+
+/*
+ * ======== sysmemmgr_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_get_config function
+ */
+static inline int sysmemmgr_ioctl_get_config(struct sysmemmgr_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct sysmemmgr_config config;
+
+ sysmemmgr_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct sysmemmgr_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_setup function
+ */
+static inline int sysmemmgr_ioctl_setup(struct sysmemmgr_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct sysmemmgr_config config;
+
+ if (cargs->args.setup.config == NULL) {
+ cargs->api_status = sysmemmgr_setup(NULL);
+ goto exit;
+ }
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct sysmemmgr_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sysmemmgr_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_destroy function
+ */
+static inline int sysmemmgr_ioctl_destroy(struct sysmemmgr_cmd_args *cargs)
+{
+ cargs->api_status = sysmemmgr_destroy();
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_alloc function
+ */
+static inline int sysmemmgr_ioctl_alloc(struct sysmemmgr_cmd_args *cargs)
+{
+ void *kbuf = NULL;
+ void *phys = NULL;
+
+ kbuf = sysmemmgr_alloc(cargs->args.alloc.size,
+ cargs->args.alloc.flags);
+ if (unlikely(kbuf == NULL))
+ goto exit;
+
+ /* If the flag is not virtually contiguous */
+ if (cargs->args.alloc.flags != sysmemmgr_allocflag_virtual)
+ phys = sysmemmgr_translate(kbuf, sysmemmgr_xltflag_kvirt2phys);
+ cargs->api_status = 0;
+
+exit:
+ cargs->args.alloc.kbuf = kbuf;
+ cargs->args.alloc.kbuf = phys;
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_free function
+ */
+static inline int sysmemmgr_ioctl_free(struct sysmemmgr_cmd_args *cargs)
+{
+ cargs->api_status = sysmemmgr_free(cargs->args.free.kbuf,
+ cargs->args.free.size,
+ cargs->args.alloc.flags);
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_translate ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_translate function
+ */
+static inline int sysmemmgr_ioctl_translate(struct sysmemmgr_cmd_args *cargs)
+{
+ cargs->args.translate.ret_ptr = sysmemmgr_translate(
+ cargs->args.translate.buf,
+ cargs->args.translate.flags);
+ WARN_ON(cargs->args.translate.ret_ptr == NULL);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl ========
+ * Purpose:
+ * ioctl interface function for sysmemmgr module
+ */
+int sysmemmgr_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct sysmemmgr_cmd_args __user *uarg =
+ (struct sysmemmgr_cmd_args __user *)args;
+ struct sysmemmgr_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct sysmemmgr_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_SYSMEMMGR_GETCONFIG:
+ os_status = sysmemmgr_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_SETUP:
+ os_status = sysmemmgr_ioctl_setup(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_DESTROY:
+ os_status = sysmemmgr_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_ALLOC:
+ os_status = sysmemmgr_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_FREE:
+ os_status = sysmemmgr_ioctl_free(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_TRANSLATE:
+ os_status = sysmemmgr_ioctl_translate(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct sysmemmgr_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmgr.c b/drivers/dsp/syslink/multicore_ipc/sysmgr.c
new file mode 100644
index 000000000000..bbf9b4be4b27
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmgr.c
@@ -0,0 +1,846 @@
+/*
+ * sysmgr.c
+ *
+ * Implementation of System manager.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include <syslink/atomic_linux.h>
+
+/* Module headers */
+#include <multiproc.h>
+#include <sysmemmgr.h>
+#include <sysmgr.h>
+#include <_sysmgr.h>
+#include <platform.h>
+#include <platform_mem.h>
+
+#include <gatepeterson.h>
+#include <sharedregion.h>
+#include <listmp.h>
+#include <messageq.h>
+#include <messageq_transportshm.h>
+#include <notify.h>
+/*#include <notify_driver.h>*/
+#include <notify_ducatidriver.h>
+
+#include <nameserver.h>
+#include <nameserver_remote.h>
+#include <nameserver_remotenotify.h>
+#include <procmgr.h>
+#include <heap.h>
+#include <heapbuf.h>
+
+/* =============================================================================
+ * Macros
+ * =============================================================================
+ */
+/*!
+ * @def BOOTLOADPAGESIZE
+ * @brief Error code base for System manager.
+ */
+#define BOOTLOADPAGESIZE (0x1000) /* 4K page size */
+
+/*!
+ * @def SYSMGR_ENTRYVALIDITYSTAMP
+ * @brief Validity stamp for boot load page entries.
+ */
+#define SYSMGR_ENTRYVALIDITYSTAMP (0xBABAC0C0)
+
+/*!
+ * @def SYSMGR_ENTRYVALIDSTAMP
+ * @brief Validity stamp for boot load page entries.
+ */
+#define SYSMGR_ENTRYVALIDSTAMP (0xBABAC0C0)
+
+/*!
+ * @def SYSMGR_SCALABILITYHANDSHAKESTAMP
+ * @brief scalability configuration handshake value.
+ */
+#define SYSMGR_SCALABILITYHANDSHAKESTAMP (0xBEEF0000)
+
+/*!
+ * @def SYSMGR_SETUPHANDSHAKESTAMP
+ * @brief Platform configured handshake value.
+ */
+#define SYSMGR_SETUPHANDSHAKESTAMP (0xBEEF0001)
+
+/*!
+ * @def SYSMGR_DESTROYHANDSHAKESTAMP
+ * @brief Destroy handshake value.
+ */
+#define SYSMGR_DESTROYHANDSHAKESTAMP (0xBEEF0002)
+
+/*!
+ * @def SYSMGR_BOOTLOADPAGESIZE
+ * @brief Boot load page size.
+ */
+#define SYSMGR_BOOTLOADPAGESIZE (0x00001000)
+
+/* Macro to make a correct module magic number with ref_count */
+#define SYSMGR_MAKE_MAGICSTAMP(x) ((SYSMGR_MODULEID << 12) | (x))
+
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/*! @brief structure for System manager boot load page entry */
+struct sysmgr_bootload_page_entry {
+ VOLATILE u32 offset;
+ /* Offset of next entry (-1 if not present) */
+ VOLATILE u32 valid;
+ /* Validity of the entry */
+ VOLATILE u32 size;
+ /* Size of the entry data */
+ VOLATILE u32 cmd_id;
+ /* Command ID */
+};
+
+/*! @brief structure containg system manager state object */
+struct sysmgr_boot_load_page {
+ VOLATILE struct sysmgr_bootload_page_entry host_config;
+ /* First entry, host specific configuration in the boot load page */
+ u8 padding1[(BOOTLOADPAGESIZE/2) - \
+ sizeof(struct sysmgr_bootload_page_entry)];
+ /* Padding1 */
+ VOLATILE u32 handshake;
+ /* Handshake variable, wrote by slave to indicate configuration done. */
+ VOLATILE struct sysmgr_bootload_page_entry slave_config;
+ /* First entry, slave specific configuration in the boot load page */
+ u8 padding2[(BOOTLOADPAGESIZE/2) - \
+ sizeof(struct sysmgr_bootload_page_entry) - \
+ sizeof(u32)];
+ /* Padding2 */
+};
+
+/*! @brief structure for System manager module state */
+struct sysmgr_module_object {
+ atomic_t ref_count;
+ /* Reference count */
+ struct sysmgr_config config;
+ /* Overall system configuration */
+ struct sysmgr_boot_load_page *boot_load_page[MULTIPROC_MAXPROCESSORS];
+ /* Boot load page of the slaves */
+ bool platform_mem_init_flag;
+ /* Platform memory manager initialize flag */
+ bool multiproc_init_flag;
+ /* Multiproc Initialize flag */
+ bool gatepeterson_init_flag;
+ /* Gatepeterson Initialize flag */
+ bool sharedregion_init_flag;
+ /* Sharedregion Initialize flag */
+ bool listmp_init_flag;
+ /* Listmp Initialize flag */
+ bool messageq_init_flag;
+ /* Messageq Initialize flag */
+ bool notify_init_flag;
+ /* Notify Initialize flag */
+ bool proc_mgr_init_flag;
+ /* Processor manager Initialize flag */
+ bool heapbuf_init_flag;
+ /* Heapbuf Initialize flag */
+ bool nameserver_init_flag;
+ /* Nameserver_remotenotify Initialize flag */
+ bool listmp_sharedmemory_init_flag;
+ /* Listmp_sharedmemory Initialize flag */
+ bool messageq_transportshm_init_flag;
+ /* Messageq_transportshm Initialize flag */
+ bool notify_ducatidrv_init_flag;
+ /* notify_ducatidrv Initialize flag */
+ bool nameserver_remotenotify_init_flag;
+ /* nameserver_remotenotify Initialize flag */
+ bool platform_init_flag;
+ /* Flag to indicate platform initialization status */
+};
+
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*!
+ * @var sysmgr_state
+ *
+ * @brief Variable holding state of system manager.
+ */
+static struct sysmgr_module_object sysmgr_state;
+
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== sysmgr_get_config ========
+ * Purpose:
+ * Function to get the default values for configuration.
+ */
+void sysmgr_get_config(struct sysmgr_config *config)
+{
+ s32 status = 0;
+
+ if (WARN_ON(config == NULL)) {
+ status = -EINVAL;
+ printk(KERN_ALERT "sysmgr_get_config [0x%x] : Argument of type"
+ " (sysmgr_get_config *) passed is null!",
+ status);
+ return;
+ }
+
+ /* Get the gatepeterson default config */
+ multiproc_get_config(&config->multiproc_cfg);
+
+ /* Get the gatepeterson default config */
+ gatepeterson_get_config(&config->gatepeterson_cfg);
+
+ /* Get the sharedregion default config */
+ sharedregion_get_config(&config->sharedregion_cfg);
+
+ /* Get the messageq default config */
+ messageq_get_config(&config->messageq_cfg);
+
+ /* Get the notify default config */
+ notify_get_config(&config->notify_cfg);
+
+ /* Get the proc_mgr default config */
+ proc_mgr_get_config(&config->proc_mgr_cfg);
+
+ /* Get the heapbuf default config */
+ heapbuf_get_config(&config->heapbuf_cfg);
+
+ /* Get the listmp_sharedmemory default config */
+ listmp_sharedmemory_get_config(&config->listmp_sharedmemory_cfg);
+
+ /* Get the messageq_transportshm default config */
+ messageq_transportshm_get_config(&config->messageq_transportshm_cfg);
+
+ /* Get the notify_ducati driver default config */
+ notify_ducatidrv_getconfig(&config->notify_ducatidrv_cfg);
+
+ /* Get the nameserver_remotenotify default config */
+ nameserver_remotenotify_get_config(
+ &config->nameserver_remotenotify_cfg);
+}
+EXPORT_SYMBOL(sysmgr_get_config);
+
+/*
+ * ======== sysmgr_get_object_config ========
+ * Purpose:
+ * Function to get the SysMgr Object configuration from Slave.
+ */
+u32 sysmgr_get_object_config(u16 proc_id, void *config, u32 cmd_id, u32 size)
+{
+ struct sysmgr_bootload_page_entry *entry = NULL;
+ u32 offset = 0;
+ u32 ret = 0;
+ struct sysmgr_boot_load_page *blp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ ret = 0;
+ goto exit;
+ }
+
+ blp = (struct sysmgr_boot_load_page *)
+ sysmgr_state.boot_load_page[proc_id];
+
+ entry = (struct sysmgr_bootload_page_entry *) &blp->slave_config;
+ while (entry->valid == SYSMGR_ENTRYVALIDSTAMP) {
+ if (entry->cmd_id == cmd_id) {
+ if (size == entry->size) {
+ memcpy(config, (void *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry)),
+ size);
+ ret = size;
+ break;
+ }
+ }
+ if (entry->offset != -1) {
+ offset += entry->offset;
+ entry = (struct sysmgr_bootload_page_entry *)
+ ((u32) &blp->slave_config + entry->offset);
+ } else {
+ break;
+ }
+ }
+
+exit:
+ /* return number of bytes wrote to the boot load page */
+ return ret;
+}
+
+
+/*
+ * ======== sysmgr_put_object_config ========
+ * Purpose:
+ * Function to put the SysMgr Object configuration to Slave.
+ */
+u32 sysmgr_put_object_config(u16 proc_id, void *config, u32 cmd_id, u32 size)
+{
+ struct sysmgr_bootload_page_entry *entry = NULL;
+ struct sysmgr_bootload_page_entry *prev = NULL;
+ u32 offset = 0;
+ struct sysmgr_boot_load_page *blp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ size = 0;
+ goto exit;
+ }
+
+ /* Get the boot load page pointer */
+ blp = sysmgr_state.boot_load_page[proc_id];
+
+ /* Put the entry at the end of list */
+ entry = (struct sysmgr_bootload_page_entry *) &blp->host_config;
+ while (entry->valid == SYSMGR_ENTRYVALIDSTAMP) {
+ prev = entry;
+ if (entry->offset != -1) {
+ offset += entry->offset;
+ entry = (struct sysmgr_bootload_page_entry *)
+ ((u32) &blp->host_config + entry->offset);
+ } else {
+ break;
+ }
+ }
+
+ /* First entry has prev set to NULL */
+ if (prev == NULL) {
+ entry->offset = -1;
+ entry->cmd_id = cmd_id;
+ entry->size = size;
+ memcpy((void *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry)),
+ config, size);
+ entry->valid = SYSMGR_ENTRYVALIDSTAMP;
+ } else {
+ entry = (struct sysmgr_bootload_page_entry *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry) + \
+ entry->size);
+ entry->offset = -1;
+ entry->cmd_id = cmd_id;
+ entry->size = size;
+ memcpy((void *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry)),
+ config, size);
+ entry->valid = SYSMGR_ENTRYVALIDSTAMP;
+
+ /* Attach the new created entry */
+ prev->offset = ((u32) entry - (u32) &blp->host_config);
+ }
+
+exit:
+ /* return number of bytes wrote to the boot load page */
+ return size;
+}
+
+
+/*
+ * ======== sysmgr_setup ========
+ * Purpose:
+ * Function to setup the System.
+ */
+s32 sysmgr_setup(const struct sysmgr_config *cfg)
+{
+ s32 status = 0;
+ struct sysmgr_config *config = NULL;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&sysmgr_state.ref_count,
+ SYSMGR_MAKE_MAGICSTAMP(0),
+ SYSMGR_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&sysmgr_state.ref_count)
+ != SYSMGR_MAKE_MAGICSTAMP(1)) {
+ status = 1;
+ goto exit;
+ }
+
+ if (cfg == NULL) {
+ sysmgr_get_config(&sysmgr_state.config);
+ config = &sysmgr_state.config;
+ } else {
+ memcpy((void *) (&sysmgr_state.config), (void *) cfg,
+ sizeof(struct sysmgr_config));
+ config = (struct sysmgr_config *) cfg;
+ }
+
+ /* Initialize PlatformMem */
+ status = platform_mem_setup();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : platform_mem_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "platform_mem_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.platform_mem_init_flag = true;
+ }
+
+ /* Override the platform specific configuration */
+ platform_override_config(config);
+
+ status = multiproc_setup(&(config->multiproc_cfg));
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : multiproc_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "sysmgr_setup : status [0x%x]\n" , status);
+ sysmgr_state.multiproc_init_flag = true;
+ }
+
+ /* Initialize ProcMgr */
+ if (status >= 0) {
+ status = proc_mgr_setup(&(config->proc_mgr_cfg));
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : proc_mgr_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "proc_mgr_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.proc_mgr_init_flag = true;
+ }
+ }
+
+ /* Initialize SharedRegion */
+ if (status >= 0) {
+ status = sharedregion_setup(&config->sharedregion_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : sharedregion_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "sharedregion_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.sharedregion_init_flag = true;
+ }
+ }
+
+ /* Initialize Notify */
+ if (status >= 0) {
+ status = notify_setup(&config->notify_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : notify_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "notify_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.notify_init_flag = true;
+ }
+ }
+
+ /* Initialize NameServer */
+ if (status >= 0) {
+ status = nameserver_setup();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : nameserver_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "nameserver_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.nameserver_init_flag = true;
+ }
+ }
+
+ /* Initialize GatePeterson */
+ if (status >= 0) {
+ status = gatepeterson_setup(&config->gatepeterson_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : gatepeterson_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "gatepeterson_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.gatepeterson_init_flag = true;
+ }
+ }
+
+ /* Intialize MessageQ */
+ if (status >= 0) {
+ status = messageq_setup(&config->messageq_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : messageq_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "messageq_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.messageq_init_flag = true;
+ }
+ }
+
+ /* Intialize HeapBuf */
+ if (status >= 0) {
+ status = heapbuf_setup(&config->heapbuf_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : heapbuf_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "heapbuf_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.heapbuf_init_flag = true;
+ }
+ }
+
+ /* Initialize ListMPSharedMemory */
+ if (status >= 0) {
+ status = listmp_sharedmemory_setup(
+ &config->listmp_sharedmemory_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "listmp_sharedmemory_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "listmp_sharedmemory_setup : "
+ "status [0x%x]\n" , status);
+ sysmgr_state.listmp_sharedmemory_init_flag = true;
+ }
+ }
+
+ /* Initialize MessageQTransportShm */
+ if (status >= 0) {
+ status = messageq_transportshm_setup(
+ &config->messageq_transportshm_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "messageq_transportshm_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "messageq_transportshm_setup : "
+ "status [0x%x]\n", status);
+ sysmgr_state.messageq_transportshm_init_flag = true;
+ }
+ }
+
+ /* Initialize Notify DucatiDriver */
+ if (status >= 0) {
+ status = notify_ducatidrv_setup(&config->notify_ducatidrv_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "notify_ducatidrv_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "notify_ducatidrv_setup : "
+ "status [0x%x]\n" , status);
+ sysmgr_state.notify_ducatidrv_init_flag = true;
+ }
+ }
+
+ /* Initialize NameServerRemoteNotify */
+ if (status >= 0) {
+ status = nameserver_remotenotify_setup(
+ &config->nameserver_remotenotify_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "nameserver_remotenotify_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "nameserver_remotenotify_setup : "
+ "status [0x%x]\n" , status);
+ sysmgr_state.nameserver_remotenotify_init_flag = true;
+ }
+ }
+
+ if (status >= 0) {
+ /* Call platform setup function */
+ status = platform_setup(config);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : platform_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "platform_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.platform_init_flag = true;
+ }
+ }
+
+exit:
+ if (status < 0)
+ atomic_set(&sysmgr_state.ref_count, SYSMGR_MAKE_MAGICSTAMP(0));
+
+ return status;
+}
+EXPORT_SYMBOL(sysmgr_setup);
+
+/*
+ * ======== sysmgr_setup ========
+ * Purpose:
+ * Function to finalize the System.
+ */
+s32 sysmgr_destroy(void)
+{
+ s32 status = 0;
+
+ if (atomic_cmpmask_and_lt(&(sysmgr_state.ref_count),
+ SYSMGR_MAKE_MAGICSTAMP(0),
+ SYSMGR_MAKE_MAGICSTAMP(1)) != false) {
+ /*! @retval SYSMGR_E_INVALIDSTATE Module was not initialized */
+ status = SYSMGR_E_INVALIDSTATE;
+ goto exit;
+ }
+
+ if (atomic_dec_return(&sysmgr_state.ref_count)
+ != SYSMGR_MAKE_MAGICSTAMP(0)) {
+ status = 1;
+ goto exit;
+ }
+
+ /* Finalize Platform module*/
+ if (sysmgr_state.platform_init_flag == true) {
+ status = platform_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : platform_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.platform_init_flag = false;
+ }
+ }
+
+ /* Finalize NameServerRemoteNotify module */
+ if (sysmgr_state.nameserver_remotenotify_init_flag == true) {
+ status = nameserver_remotenotify_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "nameserver_remotenotify_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.nameserver_remotenotify_init_flag \
+ = false;
+ }
+ }
+
+ /* Finalize Notify Ducati Driver module */
+ if (sysmgr_state.notify_ducatidrv_init_flag == true) {
+ status = notify_ducatidrv_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "notify_ducatidrv_destroy failed [0x%x]\n",
+ status);
+ } else {
+ sysmgr_state.notify_ducatidrv_init_flag = false;
+ }
+ }
+
+ /* Finalize MessageQTransportShm module */
+ if (sysmgr_state.messageq_transportshm_init_flag == true) {
+ status = messageq_transportshm_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "messageq_transportshm_destroy failed [0x%x]\n",
+ status);
+ } else {
+ sysmgr_state.messageq_transportshm_init_flag = \
+ false;
+ }
+ }
+
+ /* Finalize ListMPSharedMemory module */
+ if (sysmgr_state.listmp_sharedmemory_init_flag == true) {
+ status = listmp_sharedmemory_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "listmp_sharedmemory_destroy failed [0x%x]\n",
+ status);
+ } else {
+ sysmgr_state.listmp_sharedmemory_init_flag = \
+ false;
+ }
+ }
+
+ /* Finalize HeapBuf module */
+ if (sysmgr_state.heapbuf_init_flag == true) {
+ status = heapbuf_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : heapbuf_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.heapbuf_init_flag = false;
+ }
+ }
+
+ /* Finalize MessageQ module */
+ if (sysmgr_state.messageq_init_flag == true) {
+ status = messageq_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : messageq_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.messageq_init_flag = false;
+ }
+ }
+
+ /* Finalize GatePeterson module */
+ if (sysmgr_state.gatepeterson_init_flag == true) {
+ status = gatepeterson_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "gatepeterson_destroy failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.gatepeterson_init_flag = false;
+ }
+ }
+
+ /* Finalize NameServer module */
+ if (sysmgr_state.nameserver_init_flag == true) {
+ status = nameserver_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : nameserver_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.nameserver_init_flag = false;
+ }
+ }
+
+ /* Finalize Notify module */
+ if (sysmgr_state.notify_init_flag == true) {
+ status = notify_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : sysmgr_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.notify_init_flag = false;
+ }
+ }
+
+ /* Finalize SharedRegion module */
+ if (sysmgr_state.sharedregion_init_flag == true) {
+ status = sharedregion_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "sharedregion_destroy failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.sharedregion_init_flag = false;
+ }
+ }
+
+ /* Finalize ProcMgr module */
+ if (sysmgr_state.proc_mgr_init_flag == true) {
+ status = proc_mgr_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : proc_mgr_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.proc_mgr_init_flag = false;
+ }
+ }
+
+ /* Finalize MultiProc module */
+ if (sysmgr_state.multiproc_init_flag == true) {
+ status = multiproc_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : multiproc_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.proc_mgr_init_flag = false;
+ }
+ }
+
+ /* Finalize PlatformMem module */
+ if (sysmgr_state.platform_mem_init_flag == true) {
+ status = platform_mem_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : platform_mem_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.platform_mem_init_flag = false;
+ }
+ }
+
+ atomic_set(&sysmgr_state.ref_count, SYSMGR_MAKE_MAGICSTAMP(0));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : failed with "
+ "status = [0x%x]\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(sysmgr_destroy);
+
+/*
+ * ======== sysmgr_set_boot_load_page ========
+ * Purpose:
+ * Function to set the boot load page address for a slave.
+ */
+void sysmgr_set_boot_load_page(u16 proc_id, u32 boot_load_page)
+{
+ struct sysmgr_boot_load_page *temp = \
+ (struct sysmgr_boot_load_page *) boot_load_page;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ printk(KERN_ERR
+ "sysmgr_set_boot_load_page failed: Invalid proc_id passed\n");
+ return;
+ }
+
+ /* Initialize the host config area */
+ sysmgr_state.boot_load_page[proc_id] = temp;
+ temp->host_config.offset = -1;
+ temp->host_config.valid = 0;
+ temp->handshake = 0;
+}
+
+
+/*
+ * ======== sysmgr_wait_for_scalability_info ========
+ * Purpose:
+ * Function to wait for scalability handshake value.
+ */
+void sysmgr_wait_for_scalability_info(u16 proc_id)
+{
+ VOLATILE struct sysmgr_boot_load_page *temp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ printk(KERN_ERR "sysmgr_wait_for_scalability_info failed: "
+ "Invalid proc_id passed\n");
+ return;
+ }
+ temp = sysmgr_state.boot_load_page[proc_id];
+
+ printk(KERN_ERR "sysmgr_wait_for_scalability_info: BF while temp->handshake:%x\n",
+ temp->handshake);
+ while (temp->handshake != SYSMGR_SCALABILITYHANDSHAKESTAMP)
+ ;
+ printk(KERN_ERR "sysmgr_wait_for_scalability_info:AF while temp->handshake:%x\n",
+ temp->handshake);
+
+ /* Reset the handshake value for reverse synchronization */
+ temp->handshake = 0;
+}
+
+
+/*
+ * ======== sysmgr_wait_for_slave_setup ========
+ * Purpose:
+ * Function to wait for slave to complete setup.
+ */
+void sysmgr_wait_for_slave_setup(u16 proc_id)
+{
+ VOLATILE struct sysmgr_boot_load_page *temp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ printk(KERN_ERR "sysmgr_wait_for_slave_setup failed: "
+ "Invalid proc_id passed\n");
+ return;
+ }
+ temp = sysmgr_state.boot_load_page[proc_id];
+
+ while (temp->handshake != SYSMGR_SETUPHANDSHAKESTAMP)
+ ;
+
+ /* Reset the handshake value for reverse synchronization */
+ temp->handshake = 0;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmgr_ioctl.c b/drivers/dsp/syslink/multicore_ipc/sysmgr_ioctl.c
new file mode 100644
index 000000000000..1f7d01803967
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmgr_ioctl.c
@@ -0,0 +1,147 @@
+/*
+ * sysmgr_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the sysmgr
+ * module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+/* Module Headers */
+#include <sysmgr.h>
+#include <sysmgr_ioctl.h>
+#include <platform.h>
+/*
+ * ======== sysmgr_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to sysmgr_setup function
+ */
+static inline int sysmgr_ioctl_setup(struct sysmgr_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct sysmgr_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct sysmgr_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sysmgr_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== sysmgr_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to sysmgr_destroy function
+ */
+static inline int sysmgr_ioctl_destroy(struct sysmgr_cmd_args *cargs)
+{
+ cargs->api_status = sysmgr_destroy();
+ return 0;
+}
+
+/*
+ * ======== sysmgr_ioctl ========
+ * Purpose:
+ * ioctl interface function for sysmgr module
+ */
+int sysmgr_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct sysmgr_cmd_args __user *uarg =
+ (struct sysmgr_cmd_args __user *)args;
+ struct sysmgr_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct sysmgr_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_SYSMGR_SETUP:
+ os_status = sysmgr_ioctl_setup(&cargs);
+ break;
+
+ case CMD_SYSMGR_DESTROY:
+ os_status = sysmgr_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_SYSMGR_LOADCALLBACK:
+#if defined CONFIG_SYSLINK_USE_SYSMGR
+ platform_load_callback((void *)(cargs.args.proc_id));
+ cargs.api_status = 0;
+#endif
+ break;
+
+ case CMD_SYSMGR_STARTCALLBACK:
+#if defined CONFIG_SYSLINK_USE_SYSMGR
+ platform_start_callback((void *)(cargs.args.proc_id));
+ cargs.api_status = 0;
+#endif
+ break;
+
+ case CMD_SYSMGR_STOPCALLBACK:
+#if defined CONFIG_SYSLINK_USE_SYSMGR
+ platform_stop_callback((void *)(cargs.args.proc_id));
+ cargs.api_status = 0;
+#endif
+ break;
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct sysmgr_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/notify_ducatidriver/Kbuild b/drivers/dsp/syslink/notify_ducatidriver/Kbuild
new file mode 100644
index 000000000000..fd82baca9f04
--- /dev/null
+++ b/drivers/dsp/syslink/notify_ducatidriver/Kbuild
@@ -0,0 +1,19 @@
+
+
+omap_ducatidriver = notify_ducati.o drv_ducati.o
+
+
+obj-$(CONFIG_NOTIFY_DUCATI) += notify_ducatidriver.o
+notify_ducatidriver-objs = $(omap_ducatidriver)
+
+
+ccflags-y += -Wno-strict-prototypes
+
+#Machine dependent
+ccflags-y += -DCONFIG_DISABLE_BRIDGE_PM -DDSP_TRACEBUF_DISABLED \
+ -DVPOM4430_1_06
+
+#Header files
+ccflags-y += -Idrivers/dsp/syslink/inc
+ccflags-y += -Iarch/arm/plat-omap/include/
+
diff --git a/drivers/dsp/syslink/notify_ducatidriver/drv_ducati.c b/drivers/dsp/syslink/notify_ducatidriver/drv_ducati.c
new file mode 100644
index 000000000000..eaf53f4b5a24
--- /dev/null
+++ b/drivers/dsp/syslink/notify_ducatidriver/drv_ducati.c
@@ -0,0 +1,348 @@
+/*
+ * drv_ducatidrv.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+/* ----------------------------------- OS Specific Headers */
+#include <generated/autoconf.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/cdev.h>
+
+#include <linux/io.h>
+#include <asm/pgtable.h>
+#include <syslink/notifydefs.h>
+#include <syslink/notify_ducatidriver.h>
+#include <syslink/notify_ducatidriver_defs.h>
+#include <syslink/GlobalTypes.h>
+
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+#define NOTIFYDUCATI_NAME "notifyducatidrv"
+
+static char *driver_name = NOTIFYDUCATI_NAME;
+
+static s32 driver_major;
+
+static s32 driver_minor;
+
+struct notifyducati_dev {
+ struct cdev cdev;
+};
+
+static struct notifyducati_dev *notifyducati_device;
+
+static struct class *notifyducati_class;
+
+
+
+
+
+/* driver function to open the notify mailbox driver object. */
+static int drvducati_open(struct inode *inode, struct file *filp);
+static int drvducati_release(struct inode *inode, struct file *filp);
+static int drvducati_ioctl(struct inode *inode,
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long args);
+
+
+/* notify mailbox driver initialization function. */
+static int __init drvducati_initialize_module(void) ;
+
+/* notify mailbox driver cleanup function. */
+static void __exit drvducati_finalize_module(void) ;
+
+
+/* Function to invoke the APIs through ioctl. */
+static const struct file_operations driver_ops = {
+ .open = drvducati_open,
+ .release = drvducati_release,
+ .ioctl = drvducati_ioctl,
+
+};
+
+/* Initialization function */
+static int __init drvducati_initialize_module(void)
+{
+ int result = 0 ;
+ dev_t dev;
+
+ if (driver_major) {
+ dev = MKDEV(driver_major, driver_minor);
+ result = register_chrdev_region(dev, 1, driver_name);
+ } else {
+ result = alloc_chrdev_region(&dev, driver_minor, 1,
+ driver_name);
+ driver_major = MAJOR(dev);
+ }
+
+ notifyducati_device = kmalloc(sizeof(struct notifyducati_dev),
+ GFP_KERNEL);
+ if (!notifyducati_device) {
+ result = -ENOMEM;
+ unregister_chrdev_region(dev, 1);
+ goto func_end;
+ }
+ memset(notifyducati_device, 0, sizeof(struct notifyducati_dev));
+ cdev_init(&notifyducati_device->cdev, &driver_ops);
+ notifyducati_device->cdev.owner = THIS_MODULE;
+ notifyducati_device->cdev.ops = &driver_ops;
+
+ result = cdev_add(&notifyducati_device->cdev, dev, 1);
+
+ if (result) {
+ printk(KERN_ERR "Failed to add the syslink notify ducati device \n");
+ goto func_end;
+ }
+
+ /* udev support */
+ notifyducati_class = class_create(THIS_MODULE, "syslink-notifyducati");
+
+ if (IS_ERR(notifyducati_class)) {
+ printk(KERN_ERR "Error creating notifyducati class \n");
+ goto func_end;
+ }
+ device_create(notifyducati_class, NULL,
+ MKDEV(driver_major, driver_minor), NULL,
+ NOTIFYDUCATI_NAME);
+
+func_end:
+ return result ;
+}
+
+/* Finalization function */
+static void __exit drvducati_finalize_module(void)
+{
+ dev_t dev_no;
+
+ dev_no = MKDEV(driver_major, driver_minor);
+ if (notifyducati_device) {
+ cdev_del(&notifyducati_device->cdev);
+ kfree(notifyducati_device);
+ }
+ unregister_chrdev_region(dev_no, 1);
+ if (notifyducati_class) {
+ /* remove the device from sysfs */
+ device_destroy(notifyducati_class, MKDEV(driver_major,
+ driver_minor));
+ class_destroy(notifyducati_class);
+ }
+
+}
+/* driver open*/
+static int drvducati_open(struct inode *inode, struct file *filp)
+{
+ return 0 ;
+}
+
+/*drivr close*/
+static int drvducati_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+
+/*
+ * brief linux driver function to invoke the APIs through ioctl.
+ *
+ */
+static int drvducati_ioctl(struct inode *inode,
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long args)
+{
+ int status = 0;
+ int retval = 0;
+ struct notify_ducatidrv_cmdargs *cmd_args =
+ (struct notify_ducatidrv_cmdargs *) args;
+ struct notify_ducatidrv_cmdargs common_args;
+
+ switch (cmd) {
+ case CMD_NOTIFYDRIVERSHM_GETCONFIG: {
+ struct notify_ducatidrv_cmdargs_getconfig *src_args =
+ (struct notify_ducatidrv_cmdargs_getconfig *) args;
+ struct notify_ducatidrv_config cfg;
+ notify_ducatidrv_getconfig(&cfg);
+
+ retval = copy_to_user((void *)(src_args->cfg),
+ (const void *) &cfg,
+ sizeof(struct notify_ducatidrv_config));
+
+ if (WARN_ON(retval != 0))
+ goto func_end;
+
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_SETUP: {
+ struct notify_ducatidrv_cmdargs_setup *src_args =
+ (struct notify_ducatidrv_cmdargs_setup *) args;
+ struct notify_ducatidrv_config cfg;
+ retval = copy_from_user((void *) &cfg,
+ (const void *)(src_args->cfg),
+ sizeof(struct notify_ducatidrv_config));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+
+ status = notify_ducatidrv_setup(&cfg);
+ if (status < 0)
+ printk(KERN_ERR "FAIL: notify_ducatidrv_setup\n");
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_DESTROY: {
+ status = notify_ducatidrv_destroy();
+
+ if (status < 0)
+ printk(KERN_ERR "FAIL: notify_ducatidrv_destroy\n");
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_PARAMS_INIT: {
+ struct notify_ducatidrv_cmdargs_paramsinit src_args;
+ struct notify_ducatidrv_params params;
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(
+ struct notify_ducatidrv_cmdargs_paramsinit));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ notify_ducatidrv_params_init(src_args.handle, &params);
+
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *) &params,
+ sizeof(struct notify_ducatidrv_params));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_CREATE: {
+ struct notify_ducatidrv_cmdargs_create src_args;
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_ducatidrv_cmdargs_create));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ src_args.handle = notify_ducatidrv_create(
+ src_args.driverName,
+ &(src_args.params));
+ if (src_args.handle == NULL) {
+ status = -EFAULT;
+ printk(KERN_ERR "drvducati_ioctl:status 0x%x,"
+ "NotifyDriverShm_create failed",
+ status);
+ }
+ retval = copy_to_user((void *)(args),
+ (const void *) &src_args,
+ sizeof(struct notify_ducatidrv_cmdargs_create));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_DELETE: {
+ struct notify_ducatidrv_cmdargs_delete src_args;
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_ducatidrv_cmdargs_delete));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+
+ status = notify_ducatidrv_delete(&(src_args.handle));
+ if (status < 0)
+ printk(KERN_ERR "drvducati_ioctl:"
+ " notify_ducatidrv_delete failed"
+ " status = %d\n", status);
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_OPEN: {
+ struct notify_ducatidrv_cmdargs_open src_args;
+
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_ducatidrv_cmdargs_open));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+
+ status = notify_ducatidrv_open(src_args.driverName,
+ &(src_args.handle));
+ if (status < 0)
+ printk(KERN_ERR "drvducati_ioctl:"
+ " notify_ducatidrv_open failed"
+ " status = %d\n", status);
+ retval = copy_to_user((void *)(args),
+ (const void *) &src_args,
+ sizeof(struct notify_ducatidrv_cmdargs_open));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ }
+ break;
+
+ case CMD_NOTIFYDRIVERSHM_CLOSE: {
+ struct notify_ducatidrv_cmdargs_close src_args;
+
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_ducatidrv_cmdargs_close));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+
+ status = notify_ducatidrv_close(&(src_args.handle));
+ if (status < 0)
+ printk(KERN_ERR "drvducati_ioctl:"
+ " notify_ducatidrv_close"
+ " failed status = %d\n", status);
+ }
+ break;
+
+ default: {
+ status = -EINVAL;
+ printk(KERN_ERR "drivducati_ioctl:Unsupported"
+ " ioctl command specified");
+ }
+ break;
+ }
+
+func_end:
+ /* Set the status and copy the common args to user-side. */
+ common_args.api_status = status;
+ status = copy_to_user((void *) cmd_args,
+ (const void *) &common_args,
+ sizeof(struct notify_ducatidrv_cmdargs));
+
+ if (status < 0)
+ retval = -EFAULT;
+
+ return retval;
+}
+
+
+
+
+MODULE_LICENSE("GPL");
+module_init(drvducati_initialize_module);
+module_exit(drvducati_finalize_module);
diff --git a/drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c b/drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c
new file mode 100755
index 000000000000..f18f30694f15
--- /dev/null
+++ b/drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c
@@ -0,0 +1,1262 @@
+/*
+ * notify_ducati.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <plat/mailbox.h>
+
+#include <syslink/notify_driver.h>
+#include <syslink/notifydefs.h>
+#include <syslink/notify_driverdefs.h>
+#include <syslink/notify_ducatidriver.h>
+#include <syslink/multiproc.h>
+#include <syslink/atomic_linux.h>
+
+
+
+#define NOTIFYSHMDRV_MEM_ALIGN 0
+
+#define NOTIFYSHMDRV_MAX_EVENTS 32
+
+#define NOTIFYSHMDRV_INIT_STAMP 0xA9C8B7D6
+
+#define NOTIFYNONSHMDRV_MAX_EVENTS 1
+
+#define NOTIFYNONSHMDRV_RESERVED_EVENTS 1
+
+#define NOTIFYDRV_DUCATI_RECV_MBX 2
+
+#define NOTIFYDRV_DUCATI_SEND_MBX 3
+
+/*FIX ME: Make use of Multi Proc module */
+#define SELF_ID 0
+
+#define OTHER_ID 1
+
+#define UP 1
+
+#define DOWN 0
+
+#define PROC_TESLA 0
+#define PROC_DUCATI 1
+#define PROC_GPP 2
+#define PROCSYSM3 2
+#define PROCAPPM3 3
+#define MAX_SUBPROC_EVENTS 15
+
+/*FIXME MOVE THIS TO A SUITABLE HEADER */
+#define NOTIFYDRIVERSHM_MODULEID (u32) 0xb9d4
+
+/* Macro to make a correct module magic number with refCount */
+#define NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(x) \
+ ((NOTIFYDRIVERSHM_MODULEID << 12u) | (x))
+
+static struct omap_mbox *ducati_mbox;
+static void notify_ducatidrv_isr(void *ntfy_msg);
+static void notify_ducatidrv_isr_callback(void *ref_data, void* ntfy_msg);
+
+struct notify_driver_object_list {
+ struct list_head elem;
+ struct notify_driver_object *drv_handle;
+};
+
+
+/*
+ * brief Notify ducati driver instance object.
+ */
+struct notify_ducatidrv_object {
+ struct notify_ducatidrv_params params;
+ short int proc_id;
+ struct notify_drv_eventlist *event_list;
+ VOLATILE struct notify_shmdrv_ctrl *ctrl_ptr;
+ struct notify_shmdrv_eventreg *reg_chart;
+ struct notify_driver_object *drv_handle;
+ short int self_id;
+ short int other_id;
+};
+
+
+/*
+ * brief Defines the notify_ducatidrv state object, which contains all
+ * the module specific information.
+ */
+struct notify_ducatidrv_module {
+ atomic_t ref_count;
+ struct notify_ducatidrv_config cfg;
+ struct notify_ducatidrv_config def_cfg;
+ struct notify_ducatidrv_params def_inst_params;
+ struct mutex *gate_handle;
+ struct list_head drv_handle_list;
+} ;
+
+
+static struct notify_ducatidrv_module notify_ducatidriver_state = {
+ .gate_handle = NULL,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.shared_addr_size = 0x0,
+ .def_inst_params.num_events = NOTIFYSHMDRV_MAX_EVENTS,
+ .def_inst_params.num_reserved_events = 3,
+ .def_inst_params.send_event_poll_count = (int) -1,
+ .def_inst_params.remote_proc_id = -1,
+ .def_inst_params.recv_int_id = (int) -1,
+ .def_inst_params.send_int_id = (int) -1
+};
+
+/*
+ * This function searchs for a element the List.
+ */
+static void notify_ducatidrv_qsearch_elem(struct list_head *list,
+ struct notify_drv_eventlistner *check_obj,
+ struct notify_drv_eventlistner **listener);
+
+
+/*
+ * brief Get the default configuration for the notify_ducatidrv module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to notify_ducatidrv_setup filled in by
+ * the notify_ducatidrv module with the default parameters. If the
+ * user does not wish to make any change in the default parameters,
+ * this API is not required to be called.
+ *
+ */
+void notify_ducatidrv_getconfig(struct notify_ducatidrv_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1))
+ == true)
+ memcpy(cfg,
+ &(notify_ducatidriver_state.def_cfg),
+ sizeof(struct notify_ducatidrv_config));
+ else
+ memcpy(cfg,
+ &(notify_ducatidriver_state.cfg),
+ sizeof(struct notify_ducatidrv_config));
+}
+EXPORT_SYMBOL(notify_ducatidrv_getconfig);
+
+/*
+ * brief Function to open a handle to an existing notify_ducatidrv_object
+ * handling the procId.
+ *
+ * This function returns a handle to an existing notify_ducatidrv
+ * instance created for this procId. It enables other entities to
+ * access and use this notify_ducatidrv instance.
+ */
+int notify_ducatidrv_open(char *driver_name,
+ struct notify_driver_object **handle_ptr)
+{
+ int status = 0;
+ BUG_ON(driver_name == NULL);
+ BUG_ON(handle_ptr == NULL);
+ /* Enter critical section protection. */
+ WARN_ON(mutex_lock_interruptible(notify_ducatidriver_state.
+ gate_handle) != 0);
+ /* Get the handle from Notify module. */
+ status = notify_get_driver_handle(driver_name, handle_ptr);
+ WARN_ON(status < 0);
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ return status;
+}
+
+
+
+
+/*
+ * brief Function to close this handle to the notify_ducatidrv instance.
+ *
+ * This function closes the handle to the notify_ducatidrv instance
+ * obtained through notify_ducatidrv_open call made earlier.
+ */
+int notify_ducatidrv_close(struct notify_driver_object **handle_ptr)
+{
+ int status = 0;
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(*handle_ptr == NULL);
+ *handle_ptr = NULL;
+ return status;
+}
+
+
+
+
+/*
+ * brief Function to initialize the parameters for this notify_ducatidrv
+ * instance.
+ */
+void notify_ducatidrv_params_init(struct notify_driver_object *handle,
+ struct notify_ducatidrv_params *params)
+{
+ struct notify_ducatidrv_object *driver_obj;
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1))
+ == true) {
+ /*FIXME not intialized to be returned */
+ BUG_ON(1);
+ }
+
+ if (handle == NULL) {
+ memcpy(params,
+ &(notify_ducatidriver_state.def_inst_params),
+ sizeof(struct notify_ducatidrv_params));
+ } else {
+ /*Return updated notify_ducatidrv instance specific parameters*/
+ driver_obj = (struct notify_ducatidrv_object *)
+ handle->driver_object;
+ memcpy(params, &(driver_obj->params),
+ sizeof(struct notify_ducatidrv_params));
+ }
+}
+EXPORT_SYMBOL(notify_ducatidrv_params_init);
+
+
+
+/*
+ * brief Function to create an instance of this Notify ducati driver.
+ *
+ */
+struct notify_driver_object *notify_ducatidrv_create(char *driver_name,
+ const struct notify_ducatidrv_params *params)
+{
+
+ int status = 0;
+ struct notify_ducatidrv_object *driver_obj = NULL;
+ struct notify_driver_object *drv_handle = NULL;
+ struct notify_drv_eventlist *event_list = NULL;
+ VOLATILE struct notify_shmdrv_proc_ctrl *ctrl_ptr = NULL;
+ struct notify_driver_attrs drv_attrs;
+ struct notify_interface fxn_table;
+ struct notify_driver_object_list *drv_handle_inst = NULL;
+ int proc_id;
+ int i;
+ u32 shm_va;
+ int tmp_status = NOTIFY_SUCCESS;
+
+ BUG_ON(driver_name == NULL);
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "Module not initialized\n");
+ goto func_end;
+ }
+
+ if (params->num_events > NOTIFYSHMDRV_MAX_EVENTS) {
+ printk(KERN_ERR "More than max number of events passed\n");
+ goto func_end;
+ }
+ WARN_ON(mutex_lock_interruptible(notify_ducatidriver_state.
+ gate_handle) != 0);
+ proc_id = PROC_DUCATI;
+
+ tmp_status = notify_get_driver_handle(driver_name, &drv_handle);
+
+ if (tmp_status != NOTIFY_E_NOTFOUND) {
+ printk(KERN_ERR "Driver handle not found\n");
+ goto error_unlock_and_return;
+ }
+
+ /* Fill in information about driver attributes. */
+ /* This driver supports interaction with one other remote
+ * processor.*/
+
+ for (i = 0 ; i < MULTIPROC_MAXPROCESSORS; i++) {
+ /* Initialize all to invalid. */
+ drv_attrs.proc_info[i].proc_id = (u16)0xFFFF;
+ }
+
+ /*FIXME: Hack to allow SYSM3 and APPM3 events. Re-visit later */
+ if (params->remote_proc_id >= PROCSYSM3) {
+ for (i = PROCSYSM3; i <= PROCAPPM3; i++) {
+ drv_attrs.numProc = 1;
+ drv_attrs.proc_info[i].max_events = params->num_events;
+ drv_attrs.proc_info[i].reserved_events =
+ params->num_reserved_events;
+ /* Events are prioritized. */
+ drv_attrs.proc_info[i].event_priority = true;
+ /* 32-bit payload supported. */
+ drv_attrs.proc_info[i].payload_size = sizeof(int);
+ drv_attrs.proc_info[i].proc_id = i;
+ }
+ }
+
+ /* Function table information */
+ fxn_table.register_event = (void *)&notify_ducatidrv_register_event;
+ fxn_table.unregister_event = (void *)&notify_ducatidrv_unregister_event;
+ fxn_table.send_event = (void *)&notify_ducatidrv_sendevent;
+ fxn_table.disable = (void *)&notify_ducatidrv_disable;
+ fxn_table.restore = (void *)&notify_ducatidrv_restore;
+ fxn_table.disable_event = (void *)&notify_ducatidrv_disable_event;
+ fxn_table.enable_event = (void *)&notify_ducatidrv_enable_event;
+
+ /* Register driver with the Notify module. */
+ status = notify_register_driver(driver_name,
+ &fxn_table,
+ &drv_attrs,
+ &drv_handle);
+ /*FIXME: To take care of already exists case */
+ if ((status != NOTIFY_SUCCESS) && (status != NOTIFY_E_ALREADYEXISTS)) {
+ printk(KERN_ERR "Notify register failed\n");
+ goto error_clean_and_exit;
+ }
+ /* Allocate memory for the notify_ducatidrv_object object. */
+ drv_handle->driver_object = driver_obj =
+ kmalloc(sizeof(struct notify_ducatidrv_object),
+ GFP_ATOMIC);
+
+ if (driver_obj == NULL) {
+ status = -ENOMEM;
+ goto error_clean_and_exit;
+ } else {
+ memcpy(&(driver_obj->params), (void *) params,
+ sizeof(struct notify_ducatidrv_params));
+ }
+
+ if (params->remote_proc_id > multiproc_get_id(NULL)) {
+ driver_obj->self_id = SELF_ID;
+ driver_obj->other_id = OTHER_ID;
+ } else {
+ driver_obj->self_id = OTHER_ID;
+ driver_obj->other_id = SELF_ID;
+ }
+
+ shm_va = get_ducati_virt_mem();
+ driver_obj->ctrl_ptr = (struct notify_shmdrv_ctrl *) shm_va;
+ ctrl_ptr = &(driver_obj->ctrl_ptr->
+ proc_ctrl[driver_obj->self_id]);
+ ctrl_ptr->self_event_chart =
+ (struct notify_shmdrv_event_entry *)
+ ((int)(driver_obj->ctrl_ptr)
+ + sizeof(struct notify_shmdrv_ctrl)+
+ (sizeof(struct
+ notify_shmdrv_event_entry)
+ * params->num_events
+ * driver_obj->other_id));
+
+ ctrl_ptr->other_event_chart =
+ (struct notify_shmdrv_event_entry *)
+ ((int)(driver_obj->ctrl_ptr)
+ + sizeof(struct notify_shmdrv_ctrl) +
+ (sizeof(struct
+ notify_shmdrv_event_entry)
+ * params->num_events
+ * driver_obj->self_id));
+ driver_obj->proc_id = params->remote_proc_id;
+ driver_obj->event_list = kmalloc(
+ (sizeof(struct notify_drv_eventlist)
+ * params->num_events), GFP_ATOMIC);
+ if (driver_obj->event_list == NULL) {
+ status = -ENOMEM;
+ goto error_clean_and_exit;
+ } else {
+ memset(driver_obj->event_list, 0,
+ sizeof(struct notify_drv_eventlist)*params->
+ num_events);
+ }
+
+ driver_obj->reg_chart = kmalloc(sizeof(
+ struct notify_shmdrv_eventreg)
+ *params->num_events,
+ GFP_ATOMIC);
+ if (driver_obj->reg_chart == NULL) {
+ status = -ENOMEM;
+ goto error_clean_and_exit;
+ } else {
+ memset(driver_obj->reg_chart, 0,
+ sizeof(struct notify_shmdrv_eventreg)
+ *params->num_events);
+ }
+
+ event_list = driver_obj->event_list;
+
+ for (i = 0 ; (i < params->num_events) ; i++) {
+ ctrl_ptr->self_event_chart[i].flag = 0;
+ driver_obj->reg_chart[i].reg_event_no = (int) -1;
+ event_list[i].event_handler_count = 0;
+ INIT_LIST_HEAD(&event_list[i].listeners);
+ }
+
+ /*Set up the ISR on the Modena-ducati FIFO */
+ /* Add the driver handle to list */
+ drv_handle_inst = kmalloc(sizeof
+ (struct notify_driver_object_list), GFP_ATOMIC);
+ if (drv_handle_inst == NULL) {
+ status = -ENOMEM;
+ goto error_clean_and_exit;
+ }
+
+ drv_handle_inst->drv_handle = drv_handle;
+ list_add_tail(&(drv_handle_inst->elem),
+ &(notify_ducatidriver_state.drv_handle_list));
+
+ driver_obj = drv_handle->driver_object;
+ ctrl_ptr->reg_mask.mask = 0x0;
+ ctrl_ptr->reg_mask.enable_mask = 0xFFFFFFFF;
+ ctrl_ptr->recv_init_status = NOTIFYSHMDRV_INIT_STAMP;
+ ctrl_ptr->send_init_status = NOTIFYSHMDRV_INIT_STAMP;
+ drv_handle->is_init = NOTIFY_DRIVERINITSTATUS_DONE;
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ omap_mbox_enable_irq(ducati_mbox, IRQ_RX);
+
+ /* Done with initialization. goto function end */
+ goto func_end;
+
+error_clean_and_exit:
+ if (drv_handle != NULL) {
+ /* Unregister driver from the Notify module*/
+ notify_unregister_driver(drv_handle);
+ if (ctrl_ptr != NULL) {
+ /* Clear initialization status in
+ shared memory. */
+ ctrl_ptr->recv_init_status = 0x0;
+ ctrl_ptr->send_init_status = 0x0;
+ ctrl_ptr = NULL;
+ }
+ /* Check if driverObj was allocated. */
+ if (driver_obj != NULL) {
+ /* Check if event List was allocated. */
+ if (driver_obj->event_list != NULL) {
+ /* Check if lists were
+ created. */
+ for (i = 0 ;
+ i < params->num_events ; i++) {
+ list_del(
+ (struct list_head *)
+ &driver_obj->
+ event_list[i].
+ listeners);
+ }
+ kfree(driver_obj->event_list);
+ driver_obj->event_list = NULL;
+ }
+ /* Check if regChart was allocated. */
+ if (driver_obj->reg_chart != NULL) {
+ kfree(driver_obj->reg_chart);
+ driver_obj->reg_chart
+ = NULL;
+ }
+ kfree(driver_obj);
+ }
+ drv_handle->is_init =
+ NOTIFY_DRIVERINITSTATUS_NOTDONE;
+ drv_handle = NULL;
+ }
+
+error_unlock_and_return:
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+func_end:
+ return drv_handle;
+}
+EXPORT_SYMBOL(notify_ducatidrv_create);
+
+/*
+ * brief Function to delete the instance of shared memory driver
+ *
+ */
+int notify_ducatidrv_delete(struct notify_driver_object **handle_ptr)
+{
+ int status = 0;
+ struct notify_driver_object *drv_handle = NULL;
+ struct notify_ducatidrv_object *driver_obj = NULL;
+ struct notify_drv_eventlist *event_list;
+ short int i = 0;
+ struct list_head *elem = NULL;
+ struct notify_driver_object_list *drv_list_entry = NULL;
+ int proc_id;
+
+ if (atomic_cmpmask_and_lt(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1))
+ == true) {
+ /*FIXME not intialized to be returned */
+ return -1;
+ }
+
+ WARN_ON(handle_ptr == NULL);
+ if (handle_ptr == NULL)
+ return -1;
+
+ drv_handle = (*handle_ptr);
+ WARN_ON(drv_handle == NULL);
+ if (drv_handle == NULL)
+ return -1;
+
+ driver_obj = (struct notify_ducatidrv_object *)
+ (*handle_ptr)->driver_object;
+ WARN_ON((*handle_ptr)->driver_object == NULL);
+
+ /*Uninstall the ISRs & Disable the Mailbox interrupt.*/
+
+ if (drv_handle != NULL) {
+ list_for_each(elem,
+ &notify_ducatidriver_state.drv_handle_list) {
+ drv_list_entry = container_of(elem,
+ struct notify_driver_object_list, elem);
+ if (drv_list_entry->drv_handle == drv_handle) {
+ list_del(elem);
+ kfree(drv_list_entry);
+ status = notify_unregister_driver(drv_handle);
+ drv_handle = NULL;
+ break;
+ }
+ }
+ }
+
+ if (status != NOTIFY_SUCCESS)
+ printk(KERN_WARNING "driver is not registerd\n");
+
+ if (driver_obj != NULL) {
+ if (driver_obj->ctrl_ptr != NULL) {
+ /* Clear initialization status in shared memory. */
+ driver_obj->ctrl_ptr->proc_ctrl[driver_obj->self_id].
+ recv_init_status = 0x0;
+ driver_obj->ctrl_ptr->proc_ctrl[driver_obj->self_id].
+ send_init_status = 0x0;
+ unmap_ducati_virt_mem((u32)(driver_obj->ctrl_ptr));
+ driver_obj->ctrl_ptr = NULL;
+ }
+
+ event_list = driver_obj->event_list;
+ if (event_list != NULL) {
+ /* Check if lists were created. */
+ for (i = 0 ; i < driver_obj->params.num_events ; i++) {
+ WARN_ON(event_list[i].event_handler_count != 0);
+ event_list[i].event_handler_count = 0;
+ list_del((struct list_head *)
+ &event_list[i].listeners);
+ }
+
+ kfree(event_list);
+ driver_obj->event_list = NULL;
+ }
+
+ /* Check if regChart was allocated. */
+ if (driver_obj->reg_chart != NULL) {
+ kfree(driver_obj->reg_chart);
+ driver_obj->reg_chart = NULL;
+ }
+
+ /* Disable the interrupt, Uninstall the ISR and delete it. */
+ /* Check if ISR was created. */
+ /*Remove the ISR on the Modena-ducati FIFO */
+ proc_id = PROC_DUCATI;
+
+ omap_mbox_disable_irq(ducati_mbox, IRQ_RX);
+
+ kfree(driver_obj);
+ driver_obj = NULL;
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_ducatidrv_delete);
+
+
+/*
+ * brief Destroy the notify_ducatidrv module.
+ *
+ */
+int notify_ducatidrv_destroy(void)
+{
+ int status = 0;
+ struct list_head *handle_list = NULL;
+ struct notify_driver_object_list *entry_list = NULL;
+ struct notify_driver_object *drv_handle = NULL;
+ struct list_head *entry;
+
+ if (atomic_cmpmask_and_lt(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1))
+ == true)
+ /* FIXME Invalid state to be reuurned. */
+ return -1;
+
+ if (atomic_dec_return(&(notify_ducatidriver_state.ref_count)) ==
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0)) {
+
+ /* Temprarily increment the refcount */
+ atomic_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1));
+ handle_list = &(notify_ducatidriver_state.drv_handle_list);
+
+ list_for_each(entry, handle_list) {
+ entry_list = (struct notify_driver_object_list *)
+ container_of(entry,
+ struct notify_driver_object_list, elem);
+ drv_handle = entry_list->drv_handle;
+ notify_ducatidrv_delete(&drv_handle);
+ }
+
+ /* Check if the gate_handle was created internally. */
+
+ if (notify_ducatidriver_state.gate_handle != NULL)
+ kfree(notify_ducatidriver_state.gate_handle);
+
+ atomic_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0));
+
+ omap_mbox_put(ducati_mbox);
+ ducati_mbox = NULL;
+ }
+
+
+ return status;
+}
+EXPORT_SYMBOL(notify_ducatidrv_destroy);
+
+
+
+/*
+ * brief Setup the notify_ducatidrv module.
+ *
+ * This function sets up the notify_ducatidrv module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then notify_ducatidrv_getconfig can be called to get
+ * the configuration filled with the default values. After this,
+ * only the required configuration values can be changed. If the
+ * user does not wish to make any change in the default parameters,
+ * the application can simply call notify_ducatidrv_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ */
+int notify_ducatidrv_setup(struct notify_ducatidrv_config *cfg)
+{
+ int status = 0;
+ struct notify_ducatidrv_config tmp_cfg;
+
+ if (cfg == NULL) {
+ notify_ducatidrv_getconfig(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ /* Init the ref_count to 0 */
+ atomic_cmpmask_and_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&(notify_ducatidriver_state.ref_count)) !=
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(1u)) {
+ return 1;
+ }
+
+
+ /* Create a default gate handle here */
+ notify_ducatidriver_state.gate_handle =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ mutex_init(notify_ducatidriver_state.gate_handle);
+
+ if (notify_ducatidriver_state.gate_handle == NULL) {
+ atomic_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDRIVERSHM_MAKE_MAGICSTAMP(0));
+ status = -ENOMEM;
+ goto error_exit;
+ } else {
+ memcpy(&notify_ducatidriver_state.cfg,
+ cfg, sizeof(struct notify_ducatidrv_config));
+ }
+
+ INIT_LIST_HEAD(&(notify_ducatidriver_state.
+ drv_handle_list));
+ /* Initialize the maibox modulde for ducati */
+ if (ducati_mbox == NULL) {
+ ducati_mbox = omap_mbox_get("mailbox-2");
+ if (ducati_mbox == NULL) {
+ printk(KERN_ERR "Failed in omap_mbox_get()\n");
+ status = -ENODEV;
+ goto error_mailbox_get_failed;
+ }
+ ducati_mbox->rxq->callback =
+ (int (*)(void *))notify_ducatidrv_isr;
+ }
+
+ return 0;
+
+error_mailbox_get_failed:
+ kfree(notify_ducatidriver_state.gate_handle);
+error_exit:
+ return status;
+}
+EXPORT_SYMBOL(notify_ducatidrv_setup);
+
+
+/*
+* brief register a callback for an event with the Notify driver.
+*
+*/
+int notify_ducatidrv_register_event(
+ struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no,
+ fn_notify_cbck fn_notify_cbck,
+ void *cbck_arg)
+{
+ int status = 0;
+ int first_reg = false;
+ bool done = true;
+ struct notify_drv_eventlistner *event_listener;
+ struct notify_drv_eventlist *event_list;
+ struct notify_ducatidrv_object *driver_object;
+ struct notify_shmdrv_eventreg *reg_chart;
+ VOLATILE struct notify_shmdrv_ctrl *ctrl_ptr;
+ VOLATILE struct notify_shmdrv_event_entry *self_event_chart;
+ int i;
+ int j;
+ BUG_ON(handle == NULL);
+ BUG_ON(handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE);
+ BUG_ON(handle->driver_object == NULL);
+ BUG_ON(fn_notify_cbck == NULL);
+
+
+ driver_object = (struct notify_ducatidrv_object *)
+ handle->driver_object;
+
+ ctrl_ptr = driver_object->ctrl_ptr;
+ /* Allocate memory for event listener. */
+ event_listener = kmalloc(sizeof(struct notify_drv_eventlistner),
+ GFP_ATOMIC);
+
+ if (event_listener == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ } else {
+ memset(event_listener, 0,
+ sizeof(struct notify_drv_eventlistner));
+ }
+
+ if (mutex_lock_interruptible(notify_ducatidriver_state.gate_handle)
+ != 0)
+ WARN_ON(1);
+
+ event_list = driver_object->event_list;
+ WARN_ON(event_list == NULL);
+ event_listener->fn_notify_cbck = fn_notify_cbck;
+ event_listener->cbck_arg = cbck_arg;
+ /* Check if this is the first registration for this event. */
+
+ if (list_empty((struct list_head *)
+ &event_list[event_no].listeners)) {
+ first_reg = true;
+ self_event_chart = ctrl_ptr->proc_ctrl[driver_object->self_id].
+ self_event_chart;
+ /* Clear any pending unserviced event as there are no listeners
+ * for the pending event
+ */
+ self_event_chart[event_no].flag = DOWN;
+ }
+
+ list_add_tail((struct list_head *)
+ &(event_listener->element),
+ (struct list_head *)
+ &event_list[event_no].listeners);
+ event_list[event_no].event_handler_count++;
+
+
+ if (first_reg == true) {
+ reg_chart = driver_object->reg_chart;
+ for (i = 0 ; i < driver_object->params.num_events ; i++) {
+ /* Find the correct slot in the registration array. */
+ if (reg_chart[i].reg_event_no == (int) -1) {
+ for (j = (i - 1); j >= 0; j--) {
+ if (event_no < reg_chart[j].
+ reg_event_no) {
+ reg_chart[j + 1].reg_event_no =
+ reg_chart[j].
+ reg_event_no;
+ reg_chart[j + 1].reserved =
+ reg_chart[j].reserved;
+ i = j;
+ } else {
+ /* End the loop, slot found. */
+ j = -1;
+ }
+ }
+ reg_chart[i].reg_event_no = event_no;
+ done = true;
+ break;
+ }
+ }
+
+ if (done) {
+ set_bit(event_no, (unsigned long *)
+ &ctrl_ptr->proc_ctrl[driver_object->self_id].
+ reg_mask.mask);
+ } else {
+ /*retval NOTIFY_E_MAXEVENTS Maximum number of
+ supported events have already been registered. */
+ status = -EINVAL;
+ kfree(event_listener);
+
+ }
+ }
+func_end:
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ return status;
+}
+
+
+/*
+*
+* brief Unregister a callback for an event with the Notify driver.
+*
+*/
+
+int notify_ducatidrv_unregister_event(
+ struct notify_driver_object *handle,
+ short int proc_id,
+ int event_no,
+ fn_notify_cbck fn_notify_cbck,
+ void *cbck_arg)
+{
+ int status = 0;
+ struct notify_drv_eventlistner *listener = NULL;
+ int num_events;
+ struct notify_ducatidrv_object *driver_object;
+ struct notify_drv_eventlist *event_list;
+ struct notify_shmdrv_eventreg *reg_chart;
+ VOLATILE struct notify_shmdrv_ctrl *ctrl_ptr = NULL;
+ struct notify_drv_eventlistner unreg_info;
+ VOLATILE struct notify_shmdrv_event_entry *self_event_chart;
+ int i;
+ int j;
+
+ BUG_ON(fn_notify_cbck == NULL);
+ BUG_ON(handle == NULL);
+ BUG_ON(handle->driver_object == NULL);
+
+ driver_object = (struct notify_ducatidrv_object *)
+ handle->driver_object;
+ num_events = driver_object->params.num_events;
+
+ ctrl_ptr = driver_object->ctrl_ptr;
+
+ /* Enter critical section protection. */
+ if (mutex_lock_interruptible(notify_ducatidriver_state.gate_handle)
+ != 0)
+ WARN_ON(1);
+
+ event_list = driver_object->event_list;
+
+
+ unreg_info.fn_notify_cbck = fn_notify_cbck;
+ unreg_info.cbck_arg = cbck_arg;
+ notify_ducatidrv_qsearch_elem(&event_list[event_no].listeners,
+ &unreg_info,
+ &listener);
+ if (listener == NULL) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ list_del((struct list_head *)&(listener->element));
+ kfree(listener);
+ event_list[event_no].event_handler_count--;
+
+
+ if (list_empty((struct list_head *)
+ &event_list[event_no].listeners) == true) {
+ clear_bit(event_no, (unsigned long *)
+ &ctrl_ptr->proc_ctrl[driver_object->self_id].reg_mask.
+ mask);
+ self_event_chart = ctrl_ptr->proc_ctrl[driver_object->self_id].
+ self_event_chart;
+ /* Clear any pending unserviced event as there are no
+ * listeners for the pending event
+ */
+ self_event_chart[event_no].flag = DOWN;
+ reg_chart = driver_object->reg_chart;
+ for (i = 0; i < num_events; i++) {
+ /* Find the correct slot in the registration array. */
+ if (event_no == reg_chart[i].reg_event_no) {
+ reg_chart[i].reg_event_no = (int) -1;
+ for (j = (i + 1);
+ (reg_chart[j].reg_event_no != (int) -1)
+ && (j != num_events); j++) {
+ reg_chart[j - 1].reg_event_no =
+ reg_chart[j].reg_event_no;
+ reg_chart[j - 1].reserved =
+ reg_chart[j].reserved;
+ }
+
+ if (j == num_events) {
+ reg_chart[j - 1].reg_event_no =
+ (int) -1;
+ }
+ break;
+ }
+ }
+ }
+
+
+
+func_end:
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ return status;
+}
+
+/*
+* brief Send a notification event to the registered users for this
+* notification on the specified processor.
+*
+*/
+int notify_ducatidrv_sendevent(struct notify_driver_object *handle,
+ short int proc_id, int event_no,
+ int payload, short int wait_clear)
+{
+ int status = 0;
+ struct notify_ducatidrv_object *driver_object;
+ VOLATILE struct notify_shmdrv_ctrl *ctrl_ptr;
+ int max_poll_count;
+ VOLATILE struct notify_shmdrv_event_entry *other_event_chart;
+
+ int i = 0;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(handle->driver_object == NULL);
+
+ dsb();
+ driver_object = (struct notify_ducatidrv_object *)
+ handle->driver_object;
+
+ BUG_ON(event_no > driver_object->params.num_events);
+
+ ctrl_ptr = driver_object->ctrl_ptr;
+ other_event_chart = ctrl_ptr->proc_ctrl[driver_object->self_id].
+ other_event_chart;
+ max_poll_count = driver_object->params.send_event_poll_count;
+
+ /* Check whether driver supports interrupts from this processor to the
+ other processor, and if it is initialized
+ */
+ if (ctrl_ptr->proc_ctrl[driver_object->other_id].recv_init_status
+ != NOTIFYSHMDRV_INIT_STAMP) {
+ status = -ENODEV;
+ /* This may be used for polling till other-side
+ driver is ready, sodo not set failure reason.
+ */
+ } else {
+ /* Check if other side is ready to receive this event. */
+ if ((test_bit(event_no, (unsigned long *)
+ &ctrl_ptr->proc_ctrl[driver_object->other_id].
+ reg_mask.mask) != 1)
+ || (test_bit(event_no, &ctrl_ptr->
+ proc_ctrl[driver_object->other_id].reg_mask.
+ enable_mask) != 1)) {
+ status = -ENODEV;
+ printk(KERN_ERR "NOTIFY DRV: OTHER SIDE NOT READY TO"
+ "RECEIVE. %d\n", event_no);
+ /* This may be used for polling till other-side
+ is ready, so do not set failure reason.*/
+ } else {
+ dsb();
+ /* Enter critical section protection. */
+ if (mutex_lock_interruptible(notify_ducatidriver_state.
+ gate_handle) != 0)
+ WARN_ON(1);
+ if (wait_clear == true) {
+ /*Wait for completion of prev
+ event from other side*/
+ while ((other_event_chart[event_no].flag
+ != DOWN)
+ && status == 0) {
+ /* Leave critical section protection
+ Create a window of opportunity
+ for other interrupts to be handled.
+ */
+ mutex_unlock(notify_ducatidriver_state.
+ gate_handle);
+ i++;
+ if ((max_poll_count != (int) -1)
+ && (i == max_poll_count)) {
+ status = -EBUSY;
+ }
+ /* Enter critical section protection. */
+ if (mutex_lock_interruptible(
+ notify_ducatidriver_state.
+ gate_handle) != 0)
+ WARN_ON(1);
+ }
+ }
+ if (status >= 0) {
+ /* Set the event bit field and payload. */
+ other_event_chart[event_no].payload = payload;
+ other_event_chart[event_no].flag = UP;
+ /* Send an interrupt with the event
+ information to theremote processor */
+ status = omap_mbox_msg_send(ducati_mbox,
+ payload);
+ }
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ }
+ }
+
+ return status;
+}
+
+/*
+* brief Disable all events for this Notify driver.
+*
+*/
+void *notify_ducatidrv_disable(struct notify_driver_object *handle)
+{
+
+ omap_mbox_disable_irq(ducati_mbox, IRQ_RX);
+
+ return NULL; /*No flags to be returned. */
+}
+
+/*
+* brief Restore the notify_ducatidrv to the state before the
+* last disable was called.
+*
+*/
+int notify_ducatidrv_restore(struct notify_driver_object *handle,
+ void *flags)
+{
+ (void) handle;
+ (void) flags;
+ /*Enable the receive interrupt for ducati */
+ omap_mbox_enable_irq(ducati_mbox, IRQ_RX);
+ return 0;
+}
+
+/*
+* brief Disable a specific event for this Notify ducati driver
+*
+*/
+int notify_ducatidrv_disable_event(
+ struct notify_driver_object *handle,
+ short int proc_id, int event_no)
+{
+ static int access_count ;
+ signed long int status = 0;
+ struct notify_ducatidrv_object *driver_object;
+ BUG_ON(handle == NULL);
+ BUG_ON(handle->driver_object == NULL);
+ access_count++;
+ driver_object = (struct notify_ducatidrv_object *)
+ handle->driver_object;
+ /* Enter critical section protection. */
+
+ if (mutex_lock_interruptible(notify_ducatidriver_state.
+ gate_handle) != 0)
+ WARN_ON(1);
+
+ clear_bit(event_no, (unsigned long *)
+ &driver_object->ctrl_ptr->proc_ctrl[driver_object->self_id].
+ reg_mask.enable_mask);
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ return status;
+}
+
+/*
+* brief Enable a specific event for this Notify ducati driver
+*
+*/
+int notify_ducatidrv_enable_event(struct notify_driver_object *handle,
+ short int proc_id, int event_no)
+{
+ int status = 0;
+ struct notify_ducatidrv_object *driver_object;
+ BUG_ON(handle == NULL);
+ BUG_ON(handle->driver_object == NULL);
+
+ driver_object = (struct notify_ducatidrv_object *)
+ handle->driver_object;
+ /* Enter critical section protection. */
+
+ if (mutex_lock_interruptible(notify_ducatidriver_state.
+ gate_handle) != 0)
+ WARN_ON(1);
+
+ set_bit(event_no, (unsigned long *)
+ &driver_object->ctrl_ptr->proc_ctrl[driver_object->self_id].
+ reg_mask.enable_mask);
+
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ return status;
+}
+
+/*
+* brief Print debug information for the Notify ducati driver
+*
+*/
+int notify_ducatidrv_debug(struct notify_driver_object *handle)
+{
+ int status = 0;
+ printk(KERN_WARNING "ducati Debug: Nothing being printed currently\n");
+ return status;
+}
+
+
+/*
+ *
+ * brief This function implements the interrupt service routine for the
+ * interrupt received from the Ducati processor.
+ *
+ */
+static void notify_ducatidrv_isr(void *ntfy_msg)
+{
+ struct notify_driver_object_list *obj_list;
+ struct list_head *entry = NULL;
+ list_for_each(entry, &(notify_ducatidriver_state.drv_handle_list)) {
+ obj_list = (struct notify_driver_object_list *)
+ container_of(entry,
+ struct notify_driver_object_list, elem);
+ notify_ducatidrv_isr_callback(
+ obj_list->drv_handle->driver_object, ntfy_msg);
+ }
+}
+EXPORT_SYMBOL(notify_ducatidrv_isr);
+
+
+static void notify_ducatidrv_isr_callback(void *ref_data, void* ntfy_msg)
+{
+ int payload = 0;
+ int i = 0;
+ struct list_head *temp;
+ int j = 0;
+ VOLATILE struct notify_shmdrv_event_entry *self_event_chart;
+ struct notify_ducatidrv_object *driver_obj;
+ struct notify_shmdrv_eventreg *reg_chart;
+ VOLATILE struct notify_shmdrv_proc_ctrl *proc_ctrl_ptr;
+ int event_no;
+ dsb();
+ /* Enter critical section protection. */
+
+ driver_obj = (struct notify_ducatidrv_object *) ref_data;
+ proc_ctrl_ptr = &(driver_obj->ctrl_ptr->proc_ctrl[driver_obj->self_id]);
+ reg_chart = driver_obj->reg_chart;
+ self_event_chart = proc_ctrl_ptr->self_event_chart;
+ dsb();
+ /* Execute the loop till no asserted event
+ is found for one complete loop
+ through all registered events
+ */
+ do {
+ /* Check if the entry is a valid registered event.*/
+ event_no = reg_chart[i].reg_event_no;
+ /* Determine the current high priority event.*/
+ /* Check if the event is set and enabled.*/
+ if (self_event_chart[event_no].flag == UP &&
+ test_bit(event_no,
+ (unsigned long *) &proc_ctrl_ptr->reg_mask.enable_mask)
+ && (event_no != (int) -1)) {
+
+ payload = self_event_chart[event_no].
+ payload;
+ dsb();
+ /* Acknowledge the event. */
+ payload = (int)ntfy_msg;
+ self_event_chart[event_no].flag = DOWN;
+ dsb();
+ /*Call the callbacks associated with the event*/
+ temp = driver_obj->
+ event_list[event_no].
+ listeners.next;
+ if (temp != NULL) {
+ for (j = 0; j < driver_obj->
+ event_list[event_no].
+ event_handler_count;
+ j++) {
+ /* Check for empty list. */
+ if (temp == NULL)
+ continue;
+ /*FIXME: Hack to support SYSM3 and
+ APPM3 */
+ if ((driver_obj->proc_id == PROCAPPM3)
+ && (event_no <= MAX_SUBPROC_EVENTS)) {
+ driver_obj->proc_id = PROCSYSM3;
+ }
+ if ((driver_obj->proc_id == PROCSYSM3)
+ && (event_no > MAX_SUBPROC_EVENTS)) {
+ driver_obj->proc_id = PROCAPPM3;
+ }
+ ((struct notify_drv_eventlistner *)
+ temp)->fn_notify_cbck(
+ driver_obj->proc_id,
+ event_no,
+ ((struct notify_drv_eventlistner *)
+ temp)->cbck_arg,
+ payload);
+ temp = temp->next;
+ }
+ /* reinitialize the event check counter. */
+ i = 0;
+ }
+ } else {
+ /* check for next event. */
+ i++;
+ }
+ } while ((event_no != (int) -1)
+ && (i < driver_obj->params.num_events));
+
+}
+
+/*
+* brief This function searchs for a element the List.
+*
+*/
+static void notify_ducatidrv_qsearch_elem(struct list_head *list,
+ struct notify_drv_eventlistner *check_obj,
+ struct notify_drv_eventlistner **listener)
+{
+ struct list_head *temp = NULL ;
+ struct notify_drv_eventlistner *l_temp = NULL ;
+ short int found = false;
+
+ BUG_ON(list == NULL);
+ BUG_ON(check_obj == NULL);
+ BUG_ON(listener == NULL);
+
+ *listener = NULL;
+ if ((list != NULL) && (check_obj != NULL)) {
+ if (list_empty((struct list_head *)list) == false) {
+ temp = list->next;
+ while ((found == false) && (temp != NULL)) {
+ l_temp =
+ (struct notify_drv_eventlistner *)
+ (temp);
+ if ((l_temp->fn_notify_cbck ==
+ check_obj->fn_notify_cbck) &&
+ (l_temp->cbck_arg ==
+ check_obj->cbck_arg)) {
+ found = true;
+ } else
+ temp = temp->next;
+ }
+ if (found == true)
+ *listener = l_temp;
+ }
+ }
+ return;
+}
diff --git a/drivers/dsp/syslink/omap_notify/Kbuild b/drivers/dsp/syslink/omap_notify/Kbuild
new file mode 100755
index 000000000000..c1fed9e32f51
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/Kbuild
@@ -0,0 +1,19 @@
+libomap_notify = notify_driver.o notify.o drv_notify.o
+
+
+
+obj-$(CONFIG_MPU_BRIDGE_NOTIFY) += omap_notify.o
+omap_notify-objs = $(libomap_notify)
+
+
+ccflags-y += -Wno-strict-prototypes
+
+#Machine dependent
+ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
+ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \
+ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS \
+ -DCONFIG_DISABLE_BRIDGE_PM -DDSP_TRACEBUF_DISABLED
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include
+
diff --git a/drivers/dsp/syslink/omap_notify/drv_notify.c b/drivers/dsp/syslink/omap_notify/drv_notify.c
new file mode 100755
index 000000000000..d6695df040b8
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/drv_notify.c
@@ -0,0 +1,918 @@
+/*
+ * drv_notify.c
+ *
+ * Syslink support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <generated/autoconf.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/pgtable.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+#include <syslink/gt.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notify.h>
+#include <syslink/GlobalTypes.h>
+
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+#define IPCNOTIFY_NAME "ipcnotify"
+
+static char *driver_name = IPCNOTIFY_NAME;
+
+static s32 driver_major;
+
+static s32 driver_minor;
+
+struct ipcnotify_dev {
+ struct cdev cdev;
+};
+
+static struct ipcnotify_dev *ipcnotify_device;
+
+static struct class *ipcnotify_class;
+
+
+/*
+ * Maximum number of user supported.
+ */
+
+#define MAX_PROCESSES 32
+
+/*Structure of Event Packet read from notify kernel-side..*/
+struct notify_drv_event_packet {
+ struct list_head element;
+ u32 pid;
+ u32 proc_id;
+ u32 event_no;
+ u32 data;
+ notify_callback_fxn func;
+ void *param;
+ bool is_exit;
+};
+
+/*Structure of Event callback argument passed to register fucntion*/
+struct notify_drv_event_cbck {
+ struct list_head element;
+ u32 proc_id;
+ notify_callback_fxn func;
+ void *param;
+ u32 pid;
+};
+
+/*Keeps the information related to Event.*/
+struct notifydrv_event_state {
+ struct list_head buf_list;
+ u32 pid;
+ u32 ref_count;
+ /*Reference count, used when multiple Notify_registerEvent are called
+ from same process space(multi threads/processes). */
+ struct semaphore *semhandle;
+ /* Semphore for waiting on event. */
+ struct semaphore *tersemhandle;
+ /* Termination synchronization semaphore. */
+};
+
+struct notifydrv_moduleobject{
+ bool is_setup;
+ /*Indicates whether the module has been already setup */
+ int open_refcount;
+ /* Open reference count. */
+ struct mutex *gatehandle;
+ /*Handle of gate to be used for local thread safety */
+ struct list_head evt_cbck_list;
+ /*List containg callback arguments for all registered handlers from
+ user mode. */
+ struct notifydrv_event_state event_state[MAX_PROCESSES];
+ /* List for all user processes registered. */
+};
+
+struct notifydrv_moduleobject notifydrv_state = {
+ .is_setup = false,
+ .open_refcount = 0,
+ .gatehandle = NULL,
+};
+
+/*Major number of driver.*/
+int major = 232;
+
+static void notify_drv_setup(void);
+
+static int notify_drv_add_buf_by_pid(u16 procId, u32 pid,
+ u32 eventNo, u32 data, notify_callback_fxn cbFxn, void *param);
+
+/* open the Notify driver object..*/
+static int notify_drv_open(struct inode *inode, struct file *filp) ;
+
+/* close the Notify driver object..*/
+static int notify_drv_close(struct inode *inode, struct file *filp);
+
+/* Linux driver function to map memory regions to user space. */
+static int notify_drv_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* read function for of Notify driver.*/
+static int notify_drv_read(struct file *filp, char *dst,
+ size_t size, loff_t *offset);
+
+/* ioctl function for of Linux Notify driver.*/
+static int notify_drv_ioctl(struct inode *inode, struct file *filp, u32 cmd,
+ unsigned long args);
+
+/* Module initialization function for Linux driver.*/
+static int __init notify_drv_init_module(void);
+
+/* Module finalization function for Linux driver.*/
+static void __exit notify_drv_finalize_module(void) ;
+
+static void notify_drv_destroy(void);
+
+static int notify_drv_register_driver(void);
+
+static int notify_drv_unregister_driver(void);
+
+/* Attach a process to notify user support framework. */
+static int notify_drv_attach(u32 pid);
+
+/* Detach a process from notify user support framework. */
+static int notify_drv_detach(u32 pid);
+
+
+/* Function to invoke the APIs through ioctl.*/
+static const struct file_operations driver_ops = {
+ .open = notify_drv_open,
+ .ioctl = notify_drv_ioctl,
+ .release = notify_drv_close,
+ .read = notify_drv_read,
+ .mmap = notify_drv_mmap,
+};
+
+static int notify_drv_register_driver(void)
+{
+ notify_drv_setup();
+ return 0;
+}
+
+static int notify_drv_unregister_driver(void)
+{
+ notify_drv_destroy();
+ return 0;
+}
+
+
+/*
+* This function implements the callback registered with IPS. Here
+* to pass event no. back to user function(so that it can do another
+* level of demultiplexing of callbacks)
+*/
+static void notify_drv_cbck(u16 proc_id, u32 event_no,
+ void *arg, u32 payload)
+{
+ struct notify_drv_event_cbck *cbck;
+
+ if (WARN_ON(notifydrv_state.is_setup == false))
+ goto func_end;
+ BUG_ON(arg == NULL);
+ cbck = (struct notify_drv_event_cbck *)arg;
+ notify_drv_add_buf_by_pid(proc_id, cbck->pid, event_no, payload,
+ cbck->func, cbck->param);
+func_end:
+ return;
+}
+
+/*
+ * Linux specific function to open the driver.
+ */
+static int notify_drv_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+ * close the driver
+ */
+static int notify_drv_close(struct inode *inode, struct file *filp)
+{
+ return 0 ;
+}
+
+/*
+ * read data from the driver
+ */
+static int notify_drv_read(struct file *filp, char *dst, size_t size,
+ loff_t *offset)
+{
+
+ bool flag = false;
+ struct notify_drv_event_packet *u_buf = NULL;
+ int ret_val = 0;
+ u32 i;
+ struct list_head *elem;
+ struct notify_drv_event_packet t_buf;
+ if (WARN_ON(notifydrv_state.is_setup == false)) {
+ ret_val = -EFAULT;
+ goto func_end;
+ }
+
+ ret_val = copy_from_user((void *)&t_buf,
+ (void *)dst,
+ sizeof(struct notify_drv_event_packet));
+ WARN_ON(ret_val != 0);
+
+
+ for (i = 0 ; i < MAX_PROCESSES ; i++) {
+ if (notifydrv_state.event_state[i].pid == t_buf.pid) {
+ flag = true;
+ break;
+ }
+ }
+ if (flag == false) {
+ ret_val = -EFAULT;
+ goto func_end;
+ }
+ /* Wait for the event */
+ ret_val = down_interruptible(
+ notifydrv_state.event_state[i].semhandle);
+ if (ret_val < 0) {
+ ret_val = -ERESTARTSYS;
+ goto func_end;
+ }
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gatehandle));
+ elem = ((struct list_head *)&(notifydrv_state.event_state[i]. \
+ buf_list))->next;
+ u_buf = container_of(elem, struct notify_drv_event_packet,
+ element);
+ list_del(elem);
+ mutex_unlock(notifydrv_state.gatehandle);
+ if (u_buf == NULL) {
+ ret_val = -EFAULT;
+ goto func_end;
+ }
+ ret_val = copy_to_user((void *)dst, u_buf,
+ sizeof(struct notify_drv_event_packet));
+
+ if (WARN_ON(ret_val != 0))
+ ret_val = -EFAULT;
+ if (u_buf->is_exit == true)
+ up(notifydrv_state.event_state[i].tersemhandle);
+
+ kfree(u_buf);
+ u_buf = NULL;
+
+
+func_end:
+ return ret_val ;
+}
+
+static int notify_drv_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+/*
+ * name notify_drv_ioctl
+ *
+ * ioctl function for of Linux Notify driver.
+ *
+*/
+static int notify_drv_ioctl(struct inode *inode, struct file *filp, u32 cmd,
+ unsigned long args)
+{
+ int retval = 0;
+ int status = NOTIFY_SUCCESS;
+ struct notify_cmd_args *cmdArgs = (struct notify_cmd_args *)args;
+ struct notify_cmd_args commonArgs;
+
+ switch (cmd) {
+ case CMD_NOTIFY_GETCONFIG:
+ {
+ struct notify_cmd_args_get_config *src_args =
+ (struct notify_cmd_args_get_config *)args;
+ struct notify_config cfg;
+ notify_get_config(&cfg);
+ retval = copy_to_user((void *) (src_args->cfg),
+ (const void *) &cfg, sizeof(struct notify_config));
+ }
+ break;
+
+ case CMD_NOTIFY_SETUP:
+ {
+ struct notify_cmd_args_setup *src_args =
+ (struct notify_cmd_args_setup *) args;
+ struct notify_config cfg;
+
+ retval = copy_from_user((void *) &cfg,
+ (const void *) (src_args->cfg),
+ sizeof(struct notify_config));
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ notify_setup(&cfg);
+ }
+ break;
+
+ case CMD_NOTIFY_DESTROY:
+ {
+ /* copy_from_user is not needed for Notify_getConfig, since the
+ * user's config is not used.
+ */
+ status = notify_destroy();
+ }
+ break;
+
+ case CMD_NOTIFY_REGISTEREVENT:
+ {
+ struct notify_cmd_args_register_event src_args;
+ struct notify_drv_event_cbck *cbck = NULL;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_register_event));
+
+ if (WARN_ON(retval != 0))
+ goto func_end;
+ cbck = kmalloc(sizeof(struct notify_drv_event_cbck),
+ GFP_ATOMIC);
+ WARN_ON(cbck == NULL);
+ cbck->proc_id = src_args.procId;
+ cbck->func = src_args.fnNotifyCbck;
+ cbck->param = src_args.cbckArg;
+ cbck->pid = src_args.pid;
+ status = notify_register_event(src_args.handle, src_args.procId,
+ src_args.eventNo, notify_drv_cbck, (void *)cbck);
+ if (status < 0) {
+ /* This does not impact return status of this function,
+ * so retval comment is not used.
+ */
+ kfree(cbck);
+ } else {
+ WARN_ON(mutex_lock_interruptible
+ (notifydrv_state.gatehandle));
+ INIT_LIST_HEAD((struct list_head *)&(cbck->element));
+ list_add_tail(&(cbck->element),
+ &(notifydrv_state.evt_cbck_list));
+ mutex_unlock(notifydrv_state.gatehandle);
+ }
+ }
+ break;
+
+ case CMD_NOTIFY_UNREGISTEREVENT:
+ {
+ bool found = false;
+ u32 pid;
+ struct notify_drv_event_cbck *cbck = NULL;
+ struct list_head *entry = NULL;
+ struct notify_cmd_args_unregister_event src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args, (const void *)(args),
+ sizeof(struct notify_cmd_args_unregister_event));
+
+ if (WARN_ON(retval != 0))
+ goto func_end;
+
+ pid = src_args.pid;
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gatehandle));
+ list_for_each(entry,
+ (struct list_head *)&(notifydrv_state.evt_cbck_list)) {
+ cbck = (struct notify_drv_event_cbck *)(entry);
+ if ((cbck->func == src_args.fnNotifyCbck) &&
+ (cbck->param == src_args.cbckArg) &&
+ (cbck->pid == pid) &&
+ (cbck->proc_id == src_args.procId)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gatehandle);
+ if (found == false) {
+ status = NOTIFY_E_NOTFOUND;
+ goto func_end;
+ }
+ status = notify_unregister_event(src_args.handle,
+ src_args.procId,
+ src_args.eventNo,
+ notify_drv_cbck, (void *) cbck);
+ /* This check is needed at run-time also to propagate the
+ * status to user-side. This must not be optimized out.
+ */
+ if (status < 0)
+ printk(KERN_ERR " notify_unregister_event failed \n");
+ else {
+ WARN_ON(mutex_lock_interruptible
+ (notifydrv_state.gatehandle));
+ list_del((struct list_head *)cbck);
+ mutex_unlock(notifydrv_state.gatehandle);
+ kfree(cbck);
+ }
+ }
+ break;
+
+ case CMD_NOTIFY_SENDEVENT:
+ {
+ struct notify_cmd_args_send_event src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_send_event));
+ if (WARN_ON(retval != 0)) {
+ retval = -EFAULT;
+ goto func_end;
+ }
+ status = notify_sendevent(src_args.handle, src_args.procId,
+ src_args.eventNo, src_args.payload,
+ src_args.waitClear);
+ }
+ break;
+
+ case CMD_NOTIFY_DISABLE:
+ {
+ struct notify_cmd_args_disable src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_disable));
+
+ /* This check is needed at run-time also since it depends on
+ * run environment. It must not be optimized out.
+ */
+ if (WARN_ON(retval != 0)) {
+ retval = -EFAULT;
+ goto func_end;
+ }
+ src_args.flags = notify_disable(src_args.procId);
+
+ /* Copy the full args to user-side */
+ retval = copy_to_user((void *) (args),
+ (const void *) &src_args,
+ sizeof(struct notify_cmd_args_disable));
+ /* This check is needed at run-time also since it depends on
+ * run environment. It must not be optimized out.
+ */
+ if (WARN_ON(retval != 0))
+ retval = -EFAULT;
+ }
+ break;
+
+ case CMD_NOTIFY_RESTORE:
+ {
+ struct notify_cmd_args_restore src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_restore));
+ if (WARN_ON(retval != 0)) {
+ retval = -EFAULT;
+ goto func_end;
+ }
+ notify_restore(src_args.key, src_args.procId);
+ }
+ break;
+
+ case CMD_NOTIFY_DISABLEEVENT:
+ {
+ struct notify_cmd_args_disable_event src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_disable_event));
+
+ /* This check is needed at run-time also since it depends on
+ * run environment. It must not be optimized out.
+ */
+ if (WARN_ON(retval != 0)) {
+ retval = -EFAULT;
+ goto func_end;
+ }
+ notify_disable_event(src_args.handle, src_args.procId,
+ src_args.eventNo);
+ }
+ break;
+
+ case CMD_NOTIFY_ENABLEEVENT:
+ {
+ struct notify_cmd_args_enable_event src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_enable_event));
+ if (WARN_ON(retval != 0)) {
+ retval = -EFAULT;
+ goto func_end;
+ }
+ notify_enable_event(src_args.notify_driver_handle,
+ src_args.procId, src_args.eventNo);
+ }
+ break;
+
+ case CMD_NOTIFY_ATTACH:
+ {
+ /* FIXME: User copy_from_user */
+ u32 pid = *((u32 *)args);
+ status = notify_drv_attach(pid);
+
+ if (status < 0)
+ printk(KERN_ERR "NOTIFY_ATTACH FAILED\n");
+ }
+ break;
+
+ case CMD_NOTIFY_DETACH:
+ {
+ /* FIXME: User copy_from_user */
+ u32 pid = *((u32 *)args);
+ status = notify_drv_detach(pid);
+
+ if (status < 0)
+ printk(KERN_ERR "NOTIFY_DETACH FAILED\n");
+ }
+ break;
+
+ default:
+ {
+ /* This does not impact return status of this function,so retval
+ * comment is not used.
+ */
+ status = NOTIFY_E_INVALIDARG;
+ printk(KERN_ERR "not valid command\n");
+ }
+ break;
+ }
+func_end:
+ /* Set the status and copy the common args to user-side. */
+ commonArgs.apiStatus = status;
+ status = copy_to_user((void *) cmdArgs,
+ (const void *) &commonArgs,
+ sizeof(struct notify_cmd_args));
+ if (status < 0)
+ retval = -EFAULT;
+ return retval;
+}
+
+/*====================
+ * notify_drv_add_buf_by_pid
+ *
+ */
+static int notify_drv_add_buf_by_pid(u16 proc_id, u32 pid,
+ u32 event_no, u32 data, notify_callback_fxn cbFxn, void *param)
+{
+ s32 status = 0;
+ bool flag = false;
+ bool is_exit = false;
+ struct notify_drv_event_packet *u_buf = NULL;
+ u32 i;
+
+ for (i = 0 ; (i < MAX_PROCESSES) && (flag != true) ; i++) {
+ if (notifydrv_state.event_state[i].pid == pid) {
+ flag = true ;
+ break ;
+ }
+ }
+ if (WARN_ON(flag == false)) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ u_buf = kzalloc(sizeof(struct notify_drv_event_packet), GFP_ATOMIC);
+
+ if (u_buf != NULL) {
+ INIT_LIST_HEAD((struct list_head *)&u_buf->element);
+ u_buf->proc_id = proc_id;
+ u_buf->data = data ;
+ u_buf->event_no = event_no ;
+ u_buf->func = cbFxn;
+ u_buf->param = param;
+ if (u_buf->event_no == (u32) -1) {
+ u_buf->is_exit = true;
+ is_exit = true;
+ }
+ if (mutex_lock_interruptible(notifydrv_state.gatehandle))
+ return NOTIFY_E_OSFAILURE;
+ list_add_tail((struct list_head *)&(u_buf->element),
+ (struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list));
+ mutex_unlock(notifydrv_state.gatehandle);
+ up(notifydrv_state.event_state[i].semhandle);
+ /* Termination packet */
+ if (is_exit == true) {
+ if (down_interruptible(notifydrv_state.
+ event_state[i].tersemhandle))
+ status = NOTIFY_E_OSFAILURE;
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * Module setup function.
+ *
+ */
+static void notify_drv_setup(void)
+{
+ int i;
+
+ INIT_LIST_HEAD((struct list_head *)&(notifydrv_state.evt_cbck_list));
+ notifydrv_state.gatehandle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ mutex_init(notifydrv_state.gatehandle);
+ for (i = 0; i < MAX_PROCESSES ; i++) {
+ notifydrv_state.event_state[i].pid = -1;
+ notifydrv_state.event_state[i].ref_count = 0;
+ INIT_LIST_HEAD((struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list));
+ }
+ notifydrv_state.is_setup = true;
+}
+
+
+/*
+* brief Module destroy function.
+*/
+static void notify_drv_destroy(void)
+{
+ int i;
+ struct notify_drv_event_packet *packet;
+ struct list_head *entry;
+ struct notify_drv_event_cbck *cbck;
+
+ for (i = 0; i < MAX_PROCESSES ; i++) {
+ list_for_each(entry, (struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list)) {
+ packet = (struct notify_drv_event_packet *)entry;
+ if (packet != NULL)
+ kfree(packet);
+ }
+ INIT_LIST_HEAD(&notifydrv_state.event_state[i].buf_list);
+ }
+ list_for_each(entry,
+ (struct list_head *)&(notifydrv_state.evt_cbck_list)) {
+ cbck = (struct notify_drv_event_cbck *)(entry);
+ if (cbck != NULL)
+ kfree(cbck) ;
+ }
+ INIT_LIST_HEAD(&notifydrv_state.evt_cbck_list);
+ mutex_destroy(notifydrv_state.gatehandle);
+ kfree(notifydrv_state.gatehandle);
+ notifydrv_state.is_setup = false;
+ return;
+}
+
+
+
+
+/*
+ * brief Attach a process to notify user support framework.
+ */
+static int notify_drv_attach(u32 pid)
+{
+ s32 status = NOTIFY_SUCCESS;
+ bool flag = false;
+ bool is_init = false;
+ u32 i;
+ struct semaphore *sem_handle;
+ struct semaphore *ter_sem_handle;
+ int ret_val = 0;
+
+ if (notifydrv_state.is_setup == false) {
+ status = NOTIFY_E_FAIL;
+ } else {
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gatehandle));
+
+ for (i = 0 ; (i < MAX_PROCESSES) ; i++) {
+ if (notifydrv_state.event_state[i].pid == pid) {
+ notifydrv_state.event_state[i].ref_count++;
+ is_init = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gatehandle);
+
+ if (is_init == true)
+ goto func_end;
+
+ sem_handle = kmalloc(sizeof(struct semaphore), GFP_ATOMIC);
+ ter_sem_handle = kmalloc(sizeof(struct semaphore), GFP_ATOMIC);
+
+ sema_init(sem_handle, 0);
+ /* Create the termination semaphore */
+ sema_init(ter_sem_handle, 0);
+
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gatehandle));
+ /* Search for an available slot for user process. */
+ for (i = 0 ; i < MAX_PROCESSES ; i++) {
+ if (notifydrv_state.event_state[i].pid == -1) {
+ notifydrv_state.event_state[i].semhandle =
+ sem_handle;
+ notifydrv_state.event_state[i].tersemhandle =
+ ter_sem_handle;
+ notifydrv_state.event_state[i].pid = pid;
+ notifydrv_state.event_state[i].ref_count
+ = 1;
+ INIT_LIST_HEAD(&(notifydrv_state.event_state[i].
+ buf_list));
+ flag = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gatehandle);
+
+ if (WARN_ON(flag != true)) {
+ /* Max users have registered. No more clients
+ * can be supported */
+ status = NOTIFY_E_MAXCLIENTS;
+ }
+
+ if (status == NOTIFY_SUCCESS)
+ ret_val = 0;
+ else {
+ kfree(ter_sem_handle);
+ kfree(sem_handle);
+ ret_val = -EINVAL;
+ }
+
+ }
+func_end:
+ return ret_val;
+}
+
+
+/*
+ * brief Detach a process from notify user support framework.
+ */
+static int notify_drv_detach(u32 pid)
+{
+ s32 status = NOTIFY_SUCCESS;
+ bool flag = false;
+ u32 i;
+ struct semaphore *sem_handle;
+ struct semaphore *ter_sem_handle;
+
+ if (notifydrv_state.is_setup == false) {
+ status = NOTIFY_E_FAIL;
+ goto func_end;
+ }
+
+ /* Send the termination packet to notify thread */
+ status = notify_drv_add_buf_by_pid(0, pid, (u32)-1, (u32)0,
+ NULL, NULL);
+
+ if (status < 0)
+ goto func_end;
+
+ if (mutex_lock_interruptible(notifydrv_state.gatehandle)) {
+ status = NOTIFY_E_OSFAILURE;
+ goto func_end;
+ }
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ if (notifydrv_state.event_state[i].pid == pid) {
+ if (notifydrv_state.event_state[i].ref_count == 1) {
+ /* Last client being unregistered for this
+ * process*/
+ notifydrv_state.event_state[i].pid = -1;
+ notifydrv_state.event_state[i].ref_count = 0;
+ sem_handle =
+ notifydrv_state.event_state[i].semhandle;
+ ter_sem_handle =
+ notifydrv_state.event_state[i].tersemhandle;
+ INIT_LIST_HEAD((struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list));
+ notifydrv_state.event_state[i].semhandle =
+ NULL;
+ notifydrv_state.event_state[i].tersemhandle =
+ NULL;
+ flag = true;
+ break;
+ } else
+ notifydrv_state.event_state[i].ref_count--;
+ }
+ }
+ mutex_unlock(notifydrv_state.gatehandle);
+
+ if ((flag == false) && (i == MAX_PROCESSES)) {
+ /*retval NOTIFY_E_NOTFOUND The specified user process was
+ * not found registered with Notify Driver module. */
+ status = NOTIFY_E_NOTFOUND;
+ } else {
+ kfree(sem_handle);
+ kfree(ter_sem_handle);
+ }
+func_end:
+ return status;
+
+ /*! @retval NOTIFY_SUCCESS Operation successfully completed */
+ return status;
+}
+
+
+/* Module initialization function for Notify driver.*/
+static int __init notify_drv_init_module(void)
+{
+ int result = 0 ;
+ dev_t dev;
+
+ if (driver_major) {
+ dev = MKDEV(driver_major, driver_minor);
+ result = register_chrdev_region(dev, 1, driver_name);
+ } else {
+ result = alloc_chrdev_region(&dev, driver_minor, 1,
+ driver_name);
+ driver_major = MAJOR(dev);
+ }
+
+ ipcnotify_device = kmalloc(sizeof(struct ipcnotify_dev), GFP_KERNEL);
+ if (!ipcnotify_device) {
+ result = -ENOMEM;
+ unregister_chrdev_region(dev, 1);
+ goto func_end;
+ }
+ memset(ipcnotify_device, 0, sizeof(struct ipcnotify_dev));
+ cdev_init(&ipcnotify_device->cdev, &driver_ops);
+ ipcnotify_device->cdev.owner = THIS_MODULE;
+ ipcnotify_device->cdev.ops = &driver_ops;
+
+ result = cdev_add(&ipcnotify_device->cdev, dev, 1);
+
+ if (result) {
+ printk(KERN_ERR "Failed to add the syslink ipcnotify device \n");
+ goto func_end;
+ }
+
+ /* udev support */
+ ipcnotify_class = class_create(THIS_MODULE, "syslink-ipcnotify");
+
+ if (IS_ERR(ipcnotify_class)) {
+ printk(KERN_ERR "Error creating ipcnotify class \n");
+ goto func_end;
+ }
+ device_create(ipcnotify_class, NULL, MKDEV(driver_major, driver_minor),
+ NULL, IPCNOTIFY_NAME);
+ result = notify_drv_register_driver();
+func_end:
+ return result ;
+}
+
+/* Module finalization function for Notify driver.*/
+static void __exit notify_drv_finalize_module(void)
+{
+ dev_t dev_no;
+
+ notify_drv_unregister_driver();
+
+ dev_no = MKDEV(driver_major, driver_minor);
+ if (ipcnotify_device) {
+ cdev_del(&ipcnotify_device->cdev);
+ kfree(ipcnotify_device);
+ }
+ unregister_chrdev_region(dev_no, 1);
+ if (ipcnotify_class) {
+ /* remove the device from sysfs */
+ device_destroy(ipcnotify_class, MKDEV(driver_major,
+ driver_minor));
+ class_destroy(ipcnotify_class);
+ }
+ return;
+}
+
+
+/*
+ *name module_init/module_exit
+ *
+ *desc Macro calls that indicate initialization and finalization functions
+ *to the kernel.
+ *
+ */
+module_init(notify_drv_init_module) ;
+module_exit(notify_drv_finalize_module) ;
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/dsp/syslink/omap_notify/notify.c b/drivers/dsp/syslink/omap_notify/notify.c
new file mode 100755
index 000000000000..389ee72d997b
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/notify.c
@@ -0,0 +1,548 @@
+/*
+ * notify.c
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/pgtable.h>
+
+#include <syslink/notify.h>
+#include <syslink/notify_driver.h>
+#include <syslink/GlobalTypes.h>
+#include <syslink/gt.h>
+#include <syslink/multiproc.h>
+#include <syslink/atomic_linux.h>
+
+/*
+ * func _notify_is_support_proc
+ *
+ *desc Check if specified processor ID is supported by the Notify driver.
+ *
+ */
+static bool _notify_is_support_proc(struct notify_driver_object *drv_handle,
+ int proc_id);
+
+struct notify_module_object notify_state = {
+ .def_cfg.maxDrivers = 2,
+};
+EXPORT_SYMBOL(notify_state);
+
+
+/*
+ * Get the default configuration for the Notify module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to Notify_setup filled in by the
+ * Notify module with the default parameters. If the user
+ * does not wish to make any change in the default parameters, this
+ * API is not required to be called.
+ *
+ * param-cfg :Pointer to the Notify module configuration
+ * structure in which the default config is to be returned.
+ */
+void notify_get_config(struct notify_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(cfg, &notify_state.def_cfg,
+ sizeof(struct notify_config));
+ else
+ memcpy(cfg, &notify_state.cfg, sizeof(struct notify_config));
+}
+EXPORT_SYMBOL(notify_get_config);
+
+/*
+ * Setup the Notify module.
+ *
+ * This function sets up the Notify module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then Notify_getConfig can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. If the user
+ * does not wish to make any change in the default parameters, the
+ * application can simply call Notify_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ *
+ * param -cfg Optional Notify module configuration. If provided as
+ * NULL, default configuration is used.
+ */
+int notify_setup(struct notify_config *cfg)
+{
+ int status = NOTIFY_SUCCESS;
+ struct notify_config tmpCfg;
+
+ atomic_cmpmask_and_set(&notify_state.ref_count,
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&notify_state.ref_count)
+ != NOTIFY_MAKE_MAGICSTAMP(1u)) {
+ status = NOTIFY_S_ALREADYSETUP;
+ } else {
+ if (cfg == NULL) {
+ notify_get_config(&tmpCfg);
+ cfg = &tmpCfg;
+ }
+
+ notify_state.gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_ATOMIC);
+ /*User has not provided any gate handle,
+ so create a default handle.*/
+ mutex_init(notify_state.gate_handle);
+
+ if (WARN_ON(cfg->maxDrivers > NOTIFY_MAX_DRIVERS)) {
+ status = NOTIFY_E_CONFIG;
+ kfree(notify_state.gate_handle);
+ atomic_set(&notify_state.ref_count,
+ NOTIFY_MAKE_MAGICSTAMP(0));
+ goto func_end;
+ }
+ memcpy(&notify_state.cfg, cfg, sizeof(struct notify_config));
+ memset(&notify_state.drivers, 0,
+ (sizeof(struct notify_driver_object) *
+ NOTIFY_MAX_DRIVERS));
+
+ notify_state.disable_depth = 0;
+
+ }
+func_end:
+ return status;
+}
+EXPORT_SYMBOL(notify_setup);
+
+/*
+ * Destroy the Notify module.
+ *
+ * Once this function is called, other Notify module APIs,
+ * except for the Notify_getConfig API cannot be called
+ * anymore.
+ */
+int notify_destroy(void)
+{
+ int i;
+ int status = NOTIFY_SUCCESS;
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ } else {
+ if (atomic_dec_return(&(notify_state.ref_count)) ==
+ NOTIFY_MAKE_MAGICSTAMP(0)) {
+
+ /* Check if any Notify driver instances have
+ * not been deleted so far. If not, assert.
+ */
+ for (i = 0; i < NOTIFY_MAX_DRIVERS; i++)
+ WARN_ON(notify_state.drivers[i].is_init
+ != false);
+
+ if (notify_state.gate_handle != NULL)
+ kfree(notify_state.gate_handle);
+
+ atomic_set(&notify_state.ref_count,
+ NOTIFY_MAKE_MAGICSTAMP(0));
+ }
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_destroy);
+
+/*
+ * func notify_register_event
+ *
+ * desc This function registers a callback for a specific event with the
+ * Notify module.
+ */
+int notify_register_event(void *notify_driver_handle, u16 proc_id,
+ u32 event_no, notify_callback_fxn notify_callback_fxn, void *cbck_arg)
+{
+ int status = NOTIFY_SUCCESS;
+
+ struct notify_driver_object *drv_handle =
+ (struct notify_driver_object *)notify_driver_handle;
+
+ BUG_ON(drv_handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(drv_handle->is_init == false)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(_notify_is_support_proc(drv_handle, proc_id) != true)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK)
+ >= drv_handle->attrs.proc_info[proc_id].max_events))) {
+ status = NOTIFY_E_INVALIDEVENT;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK) <
+ drv_handle->attrs.proc_info[proc_id].reserved_events) &&
+ ((event_no & NOTIFY_SYSTEM_KEY_MASK) >> sizeof(u16)) !=
+ NOTIFY_SYSTEM_KEY)) {
+ status = NOTIFY_E_RESERVEDEVENT;
+ goto func_end;
+ }
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+
+ status = drv_handle->fn_table.register_event(drv_handle, proc_id,
+ (event_no & NOTIFY_EVENT_MASK), notify_callback_fxn,
+ cbck_arg);
+ mutex_unlock(notify_state.gate_handle);
+ if (WARN_ON(status < 0))
+ status = NOTIFY_E_FAIL;
+ else
+ status = NOTIFY_SUCCESS;
+func_end:
+ return status;
+}
+EXPORT_SYMBOL(notify_register_event);
+
+/*
+ * func notify_unregister_event
+ *
+ * desc This function un-registers the callback for the specific event with
+ * the Notify module.
+ */
+
+int notify_unregister_event(void *notify_driver_handle, u16 proc_id,
+ u32 event_no, notify_callback_fxn notify_callback_fxn, void *cbck_arg)
+{
+ int status = NOTIFY_SUCCESS;
+ struct notify_driver_object *drv_handle =
+ (struct notify_driver_object *)notify_driver_handle;
+ BUG_ON(drv_handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(drv_handle->is_init == false)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(_notify_is_support_proc(drv_handle, proc_id) != true)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK)
+ >= drv_handle->attrs.proc_info[proc_id].max_events))) {
+ status = NOTIFY_E_INVALIDEVENT;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK) <
+ drv_handle->attrs.proc_info[proc_id].reserved_events) &&
+ ((event_no & NOTIFY_SYSTEM_KEY_MASK) >> sizeof(u16)) !=
+ NOTIFY_SYSTEM_KEY)) {
+ status = NOTIFY_E_RESERVEDEVENT;
+ goto func_end;
+ }
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ status = drv_handle->fn_table.unregister_event(drv_handle,
+ proc_id, (event_no & NOTIFY_EVENT_MASK),
+ notify_callback_fxn, cbck_arg);
+ mutex_unlock(notify_state.gate_handle);
+ if (WARN_ON(status < 0))
+ status = NOTIFY_E_FAIL;
+ else
+ status = NOTIFY_SUCCESS;
+
+func_end:
+ return status;
+}
+EXPORT_SYMBOL(notify_unregister_event);
+
+/*
+ * func notify_sendevent
+ *
+ * desc This function sends a notification to the specified event.
+ *
+ *
+ */
+int notify_sendevent(void *notify_driver_handle, u16 proc_id,
+ u32 event_no, u32 payload, bool wait_clear)
+{
+ int status = NOTIFY_SUCCESS;
+ struct notify_driver_object *drv_handle =
+ (struct notify_driver_object *)notify_driver_handle;
+ BUG_ON(drv_handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(drv_handle->is_init == false)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(_notify_is_support_proc(drv_handle, proc_id) != true)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK)
+ >= drv_handle->attrs.proc_info[proc_id].max_events))) {
+ status = NOTIFY_E_INVALIDEVENT;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK) <
+ drv_handle->attrs.proc_info[proc_id].reserved_events) &&
+ ((event_no & NOTIFY_SYSTEM_KEY_MASK) >> sizeof(u16)) !=
+ NOTIFY_SYSTEM_KEY)) {
+ status = NOTIFY_E_RESERVEDEVENT;
+ goto func_end;
+ }
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ status = drv_handle->fn_table.send_event
+ (drv_handle, proc_id, (event_no & NOTIFY_EVENT_MASK),
+ payload, wait_clear);
+ mutex_unlock(notify_state.gate_handle);
+ if (status < 0)
+ status = NOTIFY_E_FAIL;
+ else
+ status = NOTIFY_SUCCESS;
+func_end:
+ return status;
+}
+EXPORT_SYMBOL(notify_sendevent);
+
+/*
+ * func notify_disable
+ *
+ * desc This function disables all events. This is equivalent to global
+ * interrupt disable, however restricted within interrupts handled by
+ * the Notify module. All callbacks registered for all events are
+ * disabled with this API. It is not possible to disable a specific
+ * callback.
+ *
+ */
+u32 notify_disable(u16 proc_id)
+{
+ struct notify_driver_object *drv_handle;
+ int i;
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)
+ WARN_ON(1);
+
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+
+ for (i = 0; i < notify_state.cfg.maxDrivers; i++) {
+ drv_handle = &(notify_state.drivers[i]);
+ WARN_ON(_notify_is_support_proc(drv_handle, proc_id)
+ == false);
+ if (drv_handle->is_init ==
+ NOTIFY_DRIVERINITSTATUS_NOTDONE) {
+ if (drv_handle->fn_table.disable) {
+ drv_handle->disable_flag[notify_state.
+ disable_depth] =
+ (u32 *)drv_handle->fn_table.
+ disable(drv_handle,
+ proc_id);
+ }
+ }
+ }
+ notify_state.disable_depth++;
+ mutex_unlock(notify_state.gate_handle);
+
+ return notify_state.disable_depth;
+}
+EXPORT_SYMBOL(notify_disable);
+
+/*
+ * notify_restore
+ *
+ * desc This function restores the Notify module to the state before the
+ * last notify_disable() was called. This is equivalent to global
+ * interrupt restore, however restricted within interrupts handled by
+ * the Notify module. All callbacks registered for all events as
+ * specified in the flags are enabled with this API. It is not possible
+ * to enable a specific callback.
+ *
+ *
+ */
+void notify_restore(u32 key, u16 proc_id)
+{
+ struct notify_driver_object *drv_handle;
+ int i;
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)
+ WARN_ON(1);
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ notify_state.disable_depth--;
+ for (i = 0; i < notify_state.cfg.maxDrivers; i++) {
+ drv_handle = &(notify_state.drivers[i]);
+ if (drv_handle->fn_table.restore)
+ drv_handle->fn_table.restore(drv_handle,
+ key, proc_id);
+ }
+ mutex_unlock(notify_state.gate_handle);
+ return;
+}
+EXPORT_SYMBOL(notify_restore);
+
+/*
+ *func notify_disable_event
+ *
+ * desc This function disables a specific event. All callbacks registered
+ * for the specific event are disabled with this API. It is not
+ * possible to disable a specific callback.
+ *
+ */
+
+void notify_disable_event(void *notify_driver_handle, u16 proc_id, u32 event_no)
+{
+ int status = 0;
+ struct notify_driver_object *drv_handle =
+ (struct notify_driver_object *)notify_driver_handle;
+ BUG_ON(drv_handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+
+ if (WARN_ON(drv_handle->is_init == false)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+ if (WARN_ON(_notify_is_support_proc(drv_handle, proc_id) != true)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK)
+ >= drv_handle->attrs.proc_info[proc_id].max_events))) {
+ status = NOTIFY_E_INVALIDEVENT;
+ goto func_end;
+ }
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK) <
+ drv_handle->attrs.proc_info[proc_id].reserved_events) &&
+ ((event_no & NOTIFY_SYSTEM_KEY_MASK) >> sizeof(u16)) !=
+ NOTIFY_SYSTEM_KEY)) {
+ status = NOTIFY_E_RESERVEDEVENT;
+ goto func_end;
+ }
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ drv_handle->fn_table.disable_event(drv_handle,
+ proc_id, (event_no & NOTIFY_EVENT_MASK));
+ mutex_unlock(notify_state.gate_handle);
+func_end:
+ return;
+}
+EXPORT_SYMBOL(notify_disable_event);
+
+/*
+ * notify_enable_event
+ *
+ * This function enables a specific event. All callbacks registered for
+ * this specific event are enabled with this API. It is not possible to
+ * enable a specific callback.
+ *
+ */
+void notify_enable_event(void *notify_driver_handle, u16 proc_id, u32 event_no)
+{
+
+ struct notify_driver_object *drv_handle =
+ (struct notify_driver_object *) notify_driver_handle;
+
+ BUG_ON(drv_handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ goto func_end;
+ }
+
+ if (WARN_ON(drv_handle->is_init == false))
+ goto func_end;
+ if (WARN_ON(_notify_is_support_proc(drv_handle, proc_id) != true))
+ goto func_end;
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK)
+ >= drv_handle->attrs.proc_info[proc_id].max_events)))
+ goto func_end;
+ if (WARN_ON(((event_no & NOTIFY_EVENT_MASK) <
+ drv_handle->attrs.proc_info[proc_id].reserved_events) &&
+ ((event_no & NOTIFY_SYSTEM_KEY_MASK) >> sizeof(u16)) !=
+ NOTIFY_SYSTEM_KEY))
+ goto func_end;
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ if (drv_handle->fn_table.enable_event) {
+ drv_handle->fn_table.enable_event(drv_handle,
+ proc_id, (event_no & NOTIFY_EVENT_MASK));
+ }
+ mutex_unlock(notify_state.gate_handle);
+func_end:
+ return;
+}
+EXPORT_SYMBOL(notify_enable_event);
+
+/*
+ *_notify_is_support_proc
+ *
+ * Check if specified processor ID is supported by the Notify driver.
+ *
+ */
+static bool _notify_is_support_proc(struct notify_driver_object *drv_handle,
+ int proc_id)
+{
+ bool found = false;
+ struct notify_driver_attrs *attrs;
+ int i;
+
+ BUG_ON(drv_handle == NULL);
+ attrs = &(drv_handle->attrs);
+ for (i = 0; i < MULTIPROC_MAXPROCESSORS; i++) {
+ if (attrs->proc_info[i].proc_id == proc_id) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
diff --git a/drivers/dsp/syslink/omap_notify/notify_driver.c b/drivers/dsp/syslink/omap_notify/notify_driver.c
new file mode 100755
index 000000000000..c90c57335918
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/notify_driver.c
@@ -0,0 +1,172 @@
+/*
+ * notify_driver.c
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+
+#include <linux/io.h>
+#include <asm/pgtable.h>
+#include <syslink/gt.h>
+
+#include <syslink/notify.h>
+#include <syslink/notify_driver.h>
+#include <syslink/atomic_linux.h>
+
+/*
+ *func notify_register_driver
+ *
+ *desc This function registers a Notify driver with the Notify module.
+ */
+
+int notify_register_driver(char *driver_name,
+ struct notify_interface *fn_table,
+ struct notify_driver_attrs *drv_attrs,
+ struct notify_driver_object **driver_handle)
+{
+ int status = NOTIFY_SUCCESS;
+ struct notify_driver_object *drv_handle = NULL;
+ int i;
+
+ BUG_ON(driver_name == NULL);
+ BUG_ON(fn_table == NULL);
+ BUG_ON(drv_attrs == NULL);
+ BUG_ON(driver_handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto func_end;
+ }
+
+ /*Initialize to status that indicates that an empty slot was not
+ *found for the driver.
+ */
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ status = NOTIFY_E_MAXDRIVERS;
+ for (i = 0; i < notify_state.cfg.maxDrivers; i++) {
+ drv_handle = &(notify_state.drivers[i]);
+
+ if (drv_handle->is_init == NOTIFY_DRIVERINITSTATUS_DONE) {
+ if (strncmp(driver_name, drv_handle->name,
+ NOTIFY_MAX_NAMELEN) == 0) {
+ status = NOTIFY_E_ALREADYEXISTS;
+ goto return_existing_handle;
+ }
+ }
+ if (drv_handle->is_init == NOTIFY_DRIVERINITSTATUS_NOTDONE) {
+ /* Found an empty slot, so block it. */
+ drv_handle->is_init =
+ NOTIFY_DRIVERINITSTATUS_INPROGRESS;
+ status = NOTIFY_SUCCESS;
+ break;
+ }
+ }
+ mutex_unlock(notify_state.gate_handle);
+ WARN_ON(status < 0);
+ /*Complete registration of the driver. */
+ strncpy(drv_handle->name,
+ driver_name, NOTIFY_MAX_NAMELEN);
+ memcpy(&(drv_handle->attrs), drv_attrs,
+ sizeof(struct notify_driver_attrs));
+ memcpy(&(drv_handle->fn_table), fn_table,
+ sizeof(struct notify_interface));
+ drv_handle->driver_object = NULL;
+
+return_existing_handle:
+ /*is_setup is set when driverInit is called. */
+ *driver_handle = drv_handle;
+
+func_end:
+ return status;
+}
+EXPORT_SYMBOL(notify_register_driver);
+
+/*========================================
+ *func notify_unregister_driver
+ *
+ *desc This function un-registers a Notify driver with the Notify module.
+ */
+int notify_unregister_driver(struct notify_driver_object *drv_handle)
+{
+ int status = NOTIFY_SUCCESS;
+
+ BUG_ON(drv_handle == NULL);
+
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ } else {
+ /* Unregister the driver. */
+ drv_handle->is_init = NOTIFY_DRIVERINITSTATUS_NOTDONE;
+
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_unregister_driver);
+
+
+/*==================================
+ * Function to find and return the driver handle maintained within
+ * the Notify module.
+ *
+ * driver_name Name of the driver to be searched.
+ * handle Return parameter: Handle to the driver.
+ *
+ */
+int notify_get_driver_handle(char *driver_name,
+ struct notify_driver_object **handle)
+{
+ int status = NOTIFY_E_NOTFOUND;
+ struct notify_driver_object *drv_handle;
+ int i;
+
+ BUG_ON(handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true) {
+ status = NOTIFY_E_INVALIDSTATE;
+ } else if (WARN_ON(driver_name == NULL))
+ status = NOTIFY_E_INVALIDARG;
+ else {
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+
+ for (i = 0; i < notify_state.cfg.maxDrivers; i++) {
+ drv_handle = &(notify_state.drivers[i]);
+ /* Check whether the driver handle slot is occupied. */
+ if (drv_handle->is_init == true) {
+ if (strncmp(driver_name, drv_handle->name,
+ NOTIFY_MAX_NAMELEN) == 0) {
+ status = NOTIFY_SUCCESS;
+ *handle = drv_handle;
+ break;
+ }
+ }
+ }
+ mutex_unlock(notify_state.gate_handle);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_get_driver_handle);
diff --git a/drivers/dsp/syslink/procmgr/Kbuild b/drivers/dsp/syslink/procmgr/Kbuild
new file mode 100755
index 000000000000..58f6d3155250
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/Kbuild
@@ -0,0 +1,10 @@
+libomap_syslink_proc = processor.o procmgr.o procmgr_drv.o
+
+obj-$(CONFIG_SYSLINK_PROC) += syslink_proc.o
+syslink_proc-objs = $(libomap_syslink_proc)
+
+ccflags-y += -Wno-strict-prototypes
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include/syslink
+
diff --git a/drivers/dsp/syslink/procmgr/proc4430/Kbuild b/drivers/dsp/syslink/procmgr/proc4430/Kbuild
new file mode 100755
index 000000000000..f82f2e23f79a
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/Kbuild
@@ -0,0 +1,10 @@
+libomap_proc4430 = proc4430.o proc4430_drv.o dmm4430.o \
+ ducatienabler.o hw_mmu.o
+
+obj-$(CONFIG_SYSLINK_PROC) += syslink_proc4430.o
+syslink_proc4430-objs = $(libomap_proc4430)
+
+ccflags-y += -Wno-strict-prototypes -DUSE_LEVEL_1_MACROS
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include/syslink
diff --git a/drivers/dsp/syslink/procmgr/proc4430/dmm4430.c b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.c
new file mode 100755
index 000000000000..d2ed9ece2245
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.c
@@ -0,0 +1,355 @@
+/*
+ * dmm4430.c
+ *
+ * Syslink support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * ======== dmm.c ========
+ * Purpose:
+ *The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
+ *space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Public Functions:
+ *dmm_create_tables
+ *dmm_create
+ *dmm_destroy
+ *dmm_exit
+ *dmm_init
+ *dmm_map_memory
+ *DMM_Reset
+ *dmm_reserve_memory
+ *dmm_unmap_memory
+ *dmm_unreserve_memory
+ *
+ * Private Functions:
+ *add_region
+ *create_region
+ *get_region
+ * get_free_region
+ * get_mapped_region
+ *
+ * Notes:
+ *Region: Generic memory entitiy having a start address and a size
+ *Chunk: Reserved region
+ *
+ *!
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include "dmm4430.h"
+
+
+#define DMM_ADDR_VIRTUAL(x, a) \
+ do { \
+ x = (((struct map_page *)(a) - p_virt_mapping_tbl) * PAGE_SIZE \
+ + dyn_mem_map_begin);\
+ } while (0)
+
+#define DMM_ADDR_TO_INDEX(i, a) \
+ do { \
+ i = (((a) - dyn_mem_map_begin) / PAGE_SIZE); \
+ } while (0)
+
+struct map_page {
+ u32 region_size:31;
+ u32 b_reserved:1;
+};
+
+/* Create the free list */
+static struct map_page *p_virt_mapping_tbl;
+static u32 i_freeregion; /* The index of free region */
+static u32 i_freesize;
+static u32 table_size;/* The size of virtual and physical pages tables */
+static u32 dyn_mem_map_begin;
+struct mutex *dmm_lock;
+
+static struct map_page *get_free_region(u32 size);
+static struct map_page *get_mapped_region(u32 addr);
+
+/* ======== dmm_create_tables ========
+ * Purpose:
+ *Create table to hold information of the virtual memory that is reserved
+ *for DSP.
+ */
+int dmm_create_tables(u32 addr, u32 size)
+{
+ int status = 0;
+
+ dmm_delete_tables();
+ if (WARN_ON(mutex_lock_interruptible(dmm_lock)) < 0) {
+ status = -EFAULT;
+ goto func_exit;
+ }
+ dyn_mem_map_begin = addr;
+ table_size = (size/PAGE_SIZE) + 1;
+ /* Create the free list */
+ p_virt_mapping_tbl = (struct map_page *)vmalloc(
+ table_size*sizeof(struct map_page));
+ if (WARN_ON(p_virt_mapping_tbl == NULL))
+ status = -ENOMEM;
+ /* On successful allocation,
+ * all entries are zero ('free') */
+ i_freeregion = 0;
+ i_freesize = table_size*PAGE_SIZE;
+ p_virt_mapping_tbl[0].region_size = table_size;
+ mutex_unlock(dmm_lock);
+
+func_exit:
+ return status;
+}
+
+/*
+ * ======== dmm_create ========
+ * Purpose:
+ *Create a dynamic memory manager object.
+ */
+int dmm_create(void)
+{
+ int status = 0;
+ dmm_lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (WARN_ON(dmm_lock == NULL)) {
+ status = -EFAULT;
+ goto func_exit;
+ }
+ mutex_init(dmm_lock);
+func_exit:
+ return status;
+}
+
+/*
+ * ======== dmm_destroy ========
+ * Purpose:
+ *Release the communication memory manager resources.
+ */
+void dmm_destroy(void)
+{
+ dmm_delete_tables();
+ kfree(dmm_lock);
+}
+
+
+/*
+ * ======== dmm_delete_tables ========
+ * Purpose:
+ *Delete DMM Tables.
+ */
+void dmm_delete_tables(void)
+{
+ /* Delete all DMM tables */
+ WARN_ON(mutex_lock_interruptible(dmm_lock));
+ if (p_virt_mapping_tbl != NULL) {
+ vfree(p_virt_mapping_tbl);
+ p_virt_mapping_tbl = NULL;
+ }
+ mutex_unlock(dmm_lock);
+}
+
+/*
+ * ======== dmm_init ========
+ * Purpose:
+ *Initializes private state of DMM module.
+ */
+void dmm_init(void)
+{
+ p_virt_mapping_tbl = NULL ;
+ table_size = 0;
+ return;
+}
+
+/*
+ * ======== dmm_reserve_memory ========
+ * Purpose:
+ *Reserve a chunk of virtually contiguous DSP/IVA address space.
+ */
+int dmm_reserve_memory(u32 size, u32 *p_rsv_addr)
+{
+ int status = 0;
+ struct map_page *node;
+ u32 rsv_addr = 0;
+ u32 rsv_size = 0;
+
+ if (WARN_ON(mutex_lock_interruptible(dmm_lock)) < 0) {
+ status = -EFAULT;
+ goto func_exit;
+ }
+
+ /* Try to get a DSP chunk from the free list */
+ node = get_free_region(size);
+ if (node != NULL) {
+ /* DSP chunk of given size is available. */
+ DMM_ADDR_VIRTUAL(rsv_addr, node);
+ /* Calculate the number entries to use */
+ rsv_size = size/PAGE_SIZE;
+ if (rsv_size < node->region_size) {
+ /* Mark remainder of free region */
+ node[rsv_size].b_reserved = false;
+ node[rsv_size].region_size =
+ node->region_size - rsv_size;
+ }
+ /* get_region will return first fit chunk. But we only use what
+ is requested. */
+ node->b_reserved = true;
+ node->region_size = rsv_size;
+ /* Return the chunk's starting address */
+ *p_rsv_addr = rsv_addr;
+ } else
+ /*dSP chunk of given size is not available */
+ status = -ENOMEM;
+
+ mutex_unlock(dmm_lock);
+func_exit:
+ return status;
+}
+
+
+/*
+ * ======== dmm_unreserve_memory ========
+ * Purpose:
+ *Free a chunk of reserved DSP/IVA address space.
+ */
+int dmm_unreserve_memory(u32 rsv_addr, u32 *psize)
+{
+ struct map_page *chunk;
+ int status = 0;
+
+ WARN_ON(mutex_lock_interruptible(dmm_lock));
+
+ /* Find the chunk containing the reserved address */
+ chunk = get_mapped_region(rsv_addr);
+ if (chunk == NULL)
+ status = -ENXIO;
+ WARN_ON(status < 0);
+ if (status == 0) {
+ chunk->b_reserved = false;
+ *psize = chunk->region_size * PAGE_SIZE;
+ /* NOTE: We do NOT coalesce free regions here.
+ * Free regions are coalesced in get_region(), as it traverses
+ *the whole mapping table
+ */
+ }
+ mutex_unlock(dmm_lock);
+ return status;
+}
+
+/*
+ * ======== get_free_region ========
+ * Purpose:
+ * Returns the requested free region
+ */
+static struct map_page *get_free_region(u32 size)
+{
+ struct map_page *curr_region = NULL;
+ u32 i = 0;
+ u32 region_size = 0;
+ u32 next_i = 0;
+
+ if (p_virt_mapping_tbl == NULL)
+ return curr_region;
+ if (size > i_freesize) {
+ /* Find the largest free region
+ * (coalesce during the traversal) */
+ while (i < table_size) {
+ region_size = p_virt_mapping_tbl[i].region_size;
+ next_i = i+region_size;
+ if (p_virt_mapping_tbl[i].b_reserved == false) {
+ /* Coalesce, if possible */
+ if (next_i < table_size &&
+ p_virt_mapping_tbl[next_i].b_reserved
+ == false) {
+ p_virt_mapping_tbl[i].region_size +=
+ p_virt_mapping_tbl[next_i].region_size;
+ continue;
+ }
+ region_size *= PAGE_SIZE;
+ if (region_size > i_freesize) {
+ i_freeregion = i;
+ i_freesize = region_size;
+ }
+ }
+ i = next_i;
+ }
+ }
+ if (size <= i_freesize) {
+ curr_region = p_virt_mapping_tbl + i_freeregion;
+ i_freeregion += (size / PAGE_SIZE);
+ i_freesize -= size;
+ }
+ return curr_region;
+}
+
+/*
+ * ======== get_mapped_region ========
+ * Purpose:
+ * Returns the requestedmapped region
+ */
+static struct map_page *get_mapped_region(u32 addr)
+{
+ u32 i = 0;
+ struct map_page *curr_region = NULL;
+
+ if (p_virt_mapping_tbl == NULL)
+ return curr_region;
+
+ DMM_ADDR_TO_INDEX(i, addr);
+ if (i < table_size && (p_virt_mapping_tbl[i].b_reserved))
+ curr_region = p_virt_mapping_tbl + i;
+ return curr_region;
+}
+
+#ifdef DSP_DMM_DEBUG
+int dmm_mem_map_dump(void)
+{
+ struct map_page *curNode = NULL;
+ u32 i;
+ u32 freemem = 0;
+ u32 bigsize = 0;
+
+ WARN_ON(mutex_lock_interruptible(dmm_lock));
+
+ if (p_virt_mapping_tbl != NULL) {
+ for (i = 0; i < table_size; i +=
+ p_virt_mapping_tbl[i].region_size) {
+ curNode = p_virt_mapping_tbl + i;
+ if (curNode->b_reserved == true) {
+ /*printk("RESERVED size = 0x%x, "
+ "Map size = 0x%x\n",
+ (curNode->region_size * PAGE_SIZE),
+ (curNode->b_mapped == false) ? 0 :
+ (curNode->mapped_size * PAGE_SIZE));
+*/
+ } else {
+/* printk("UNRESERVED size = 0x%x\n",
+ (curNode->region_size * PAGE_SIZE));
+*/
+ freemem += (curNode->region_size * PAGE_SIZE);
+ if (curNode->region_size > bigsize)
+ bigsize = curNode->region_size;
+ }
+ }
+ }
+ printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+ freemem/(1024*1024));
+ printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+ (((table_size * PAGE_SIZE)-freemem))/(1024*1024));
+ printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+ (bigsize*PAGE_SIZE/(1024*1024)));
+ mutex_unlock(dmm_lock);
+
+ return 0;
+}
+#endif
diff --git a/drivers/dsp/syslink/procmgr/proc4430/dmm4430.h b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.h
new file mode 100755
index 000000000000..7d879f4fe123
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.h
@@ -0,0 +1,50 @@
+/*
+ * dmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+/*
+ * ======== dmm.h ========
+ * Purpose:
+ *The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
+ *space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Public Functions:
+ *
+ */
+
+#ifndef DMM_4430_
+#define DMM_4430_
+
+#include <linux/types.h>
+
+int dmm_reserve_memory(u32 size, u32 *p_rsv_addr);
+
+int dmm_unreserve_memory(u32 rsv_addr, u32 *psize);
+
+void dmm_destroy(void);
+
+void dmm_delete_tables(void);
+
+int dmm_create(void);
+
+void dmm_init(void);
+
+int dmm_create_tables(u32 addr, u32 size);
+
+#ifdef DSP_DMM_DEBUG
+int dmm_mem_map_dump(void);
+#endif
+#endif/* DMM_4430_ */
diff --git a/drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c b/drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c
new file mode 100644
index 000000000000..581888639cdf
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c
@@ -0,0 +1,869 @@
+/*
+ * ducatienabler.c
+ *
+ * Syslink driver support for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+
+
+#include <generated/autoconf.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <linux/syscalls.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/interrupt.h>
+#include <plat/irqs.h>
+
+#include <syslink/ducatienabler.h>
+#include <syslink/MMUAccInt.h>
+
+#include <plat/iommu.h>
+#include "../../../arch/arm/plat-omap/iopgtable.h"
+
+
+#ifdef DEBUG_DUCATI_IPC
+#define DPRINTK(fmt, args...) printk(KERN_INFO "%s: " fmt, __func__, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+
+#define base_ducati_l2_mmuPhys 0x55082000
+
+/*
+ * Macro to define the physical memory address for the
+ * Ducati Base image. The 74Mb memory is preallocated
+ * during the make menuconfig.
+ *
+ */
+/* #define DUCATI_BASEIMAGE_PHYSICAL_ADDRESS 0x87200000 */
+#define DUCATI_BASEIMAGE_PHYSICAL_ADDRESS 0x9CF00000
+
+
+/* Attributes of L2 page tables for DSP MMU.*/
+struct page_info {
+ /* Number of valid PTEs in the L2 PT*/
+ u32 num_entries;
+};
+
+enum pagetype {
+ SECTION = 0,
+ LARGE_PAGE = 1,
+ SMALL_PAGE = 2,
+ SUPER_SECTION = 3
+};
+
+
+static u32 shm_phys_addr;
+static u32 shm_virt_addr;
+
+struct iommu *ducati_iommu_ptr;
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+ pr_emerg("Bad page state in process '%s'\n", current->comm);
+ BUG();
+}
+
+/*============================================
+ * This function calculates PTE address (MPU virtual) to be updated
+ * It also manages the L2 page tables
+ */
+static int pte_set(u32 pa, u32 va, u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+ struct iotlb_entry tlb_entry;
+ switch (size) {
+ case HW_PAGE_SIZE_16MB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
+ break;
+ case HW_PAGE_SIZE_1MB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
+ break;
+ case HW_PAGE_SIZE_64KB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
+ break;
+ case HW_PAGE_SIZE_4KB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
+ break;
+ }
+ tlb_entry.prsvd = MMU_CAM_P;
+ tlb_entry.valid = MMU_CAM_V;
+ switch (attrs->element_size) {
+ case HW_ELEM_SIZE_8BIT:
+ tlb_entry.elsz = MMU_RAM_ELSZ_8;
+ break;
+ case HW_ELEM_SIZE_16BIT:
+ tlb_entry.elsz = MMU_RAM_ELSZ_16;
+ break;
+ case HW_ELEM_SIZE_32BIT:
+ tlb_entry.elsz = MMU_RAM_ELSZ_32;
+ break;
+ case HW_ELEM_SIZE_64BIT:
+ tlb_entry.elsz = 0x3; /* No translation */
+ break;
+ }
+ switch (attrs->endianism) {
+ case HW_LITTLE_ENDIAN:
+ tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
+ break;
+ case HW_BIG_ENDIAN:
+ tlb_entry.endian = MMU_RAM_ENDIAN_BIG;
+ break;
+ }
+ switch (attrs->mixedSize) {
+ case HW_MMU_TLBES:
+ tlb_entry.mixed = 0;
+ break;
+ case HW_MMU_CPUES:
+ tlb_entry.mixed = MMU_RAM_MIXED;
+ break;
+ }
+ tlb_entry.da = va;
+ tlb_entry.pa = pa;
+ DPRINTK("pte set ducati_iommu_ptr = 0x%x, tlb_entry = 0x%x \n",
+ ducati_iommu_ptr, tlb_entry);
+ if (iopgtable_store_entry(ducati_iommu_ptr, &tlb_entry))
+ goto error_exit;
+ return 0;
+error_exit:
+ printk(KERN_ERR "pte set failure \n");
+ return -EFAULT;
+}
+
+
+/*=============================================
+ * This function calculates the optimum page-aligned addresses and sizes
+ * Caller must pass page-aligned values
+ */
+static int pte_update(u32 pa, u32 va, u32 size,
+ struct hw_mmu_map_attrs_t *map_attrs)
+{
+ u32 i;
+ u32 all_bits;
+ u32 pa_curr = pa;
+ u32 va_curr = va;
+ u32 num_bytes = size;
+ int status = 0;
+ u32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
+ HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
+ DPRINTK("> pte_update pa %x, va %x, "
+ "size %x, map_attrs %x\n", pa, va, size, (u32)map_attrs);
+ while (num_bytes && (status == 0)) {
+ /* To find the max. page size with which both PA & VA are
+ * aligned */
+ all_bits = pa_curr | va_curr;
+ DPRINTK("all_bits %x, pa_curr %x, va_curr %x, "
+ "num_bytes %x\n ",
+ all_bits, pa_curr, va_curr, num_bytes);
+
+ for (i = 0; i < 4; i++) {
+ if ((num_bytes >= pg_size[i]) && ((all_bits &
+ (pg_size[i] - 1)) == 0)) {
+ DPRINTK("pg_size %x\n", pg_size[i]);
+ status = pte_set(pa_curr,
+ va_curr, pg_size[i], map_attrs);
+ pa_curr += pg_size[i];
+ va_curr += pg_size[i];
+ num_bytes -= pg_size[i];
+ /* Don't try smaller sizes. Hopefully we have
+ * reached an address aligned to a bigger page
+ * size */
+ break;
+ }
+ }
+ }
+ DPRINTK("< pte_update status %x num_bytes %x\n", status, num_bytes);
+ return status;
+}
+
+/*
+ * ======== ducati_mem_unmap ========
+ * Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ * PTEs of a mapped memory block are contiguous in any page table
+ * So, instead of looking up the PTE address for every 4K block,
+ * we clear consecutive PTEs until we unmap all the bytes
+ */
+int ducati_mem_unmap(u32 da, u32 num_bytes)
+{
+ u32 bytes;
+ struct page *pg = NULL;
+ int temp = 0;
+ u32 nent;
+ u32 phyaddress;
+ s32 numofBytes = num_bytes;
+
+ while (num_bytes > 0) {
+ u32 *iopgd = iopgd_offset(ducati_iommu_ptr, da);
+ if (*iopgd & IOPGD_TABLE) {
+ u32 *iopte = iopte_offset(iopgd, da);
+ if (*iopte & IOPTE_LARGE) {
+ nent = 16;
+ /* rewind to the 1st entry */
+ iopte = (u32 *)((u32)iopte & IOLARGE_MASK);
+ } else
+ nent = 1;
+ phyaddress = (*iopte) & IOPAGE_MASK;
+ } else {
+ if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
+ nent = 4096;
+ /* rewind to the 1st entry */
+ iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK);
+ } else
+ nent = 256;
+ phyaddress = (*iopgd) & IOPGD_MASK;
+ }
+ for (temp = 0; temp < nent; temp++) {
+ if (pfn_valid(__phys_to_pfn(phyaddress))) {
+ pg = phys_to_page(phyaddress);
+ if (page_count(pg) < 1) {
+ pr_info("DSPBRIDGE:UNMAP function: "
+ "COUNT 0 FOR PA 0x%x,"
+ " size = 0x%x\n",
+ phyaddress, numofBytes);
+ bad_page_dump(phyaddress, pg);
+ }
+ SetPageDirty(pg);
+ page_cache_release(pg);
+ }
+ phyaddress += HW_PAGE_SIZE_4KB;
+ }
+ bytes = iopgtable_clear_entry(ducati_iommu_ptr, da);
+ num_bytes -= bytes;
+ da += bytes;
+ }
+ return 0;
+}
+
+
+/*
+ * ======== ducati_mem_virtToPhys ========
+ * This funciton provides the translation from
+ * Remote virtual address to Physical address
+ */
+
+inline u32 ducati_mem_virtToPhys(u32 da)
+{
+#if 0
+ /* FIXME: temp work around till L2MMU issue
+ * is resolved
+ */
+ u32 *iopgd = iopgd_offset(ducati_iommu_ptr, da);
+ u32 phyaddress;
+
+ if (*iopgd & IOPGD_TABLE) {
+ u32 *iopte = iopte_offset(iopgd, da);
+ if (*iopte & IOPTE_LARGE) {
+ phyaddress = *iopte & IOLARGE_MASK;
+ phyaddress |= (da & (IOLARGE_SIZE - 1));
+ } else
+ phyaddress = (*iopte) & IOPAGE_MASK;
+ } else {
+ if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
+ phyaddress = *iopgd & IOSUPER_MASK;
+ phyaddress |= (da & (IOSUPER_SIZE - 1));
+ } else {
+ phyaddress = (*iopgd) & IOPGD_MASK;
+ phyaddress |= (da & (IOPGD_SIZE - 1));
+ }
+ }
+#endif
+ return da;
+}
+
+
+/*
+ * ======== user_va2pa ========
+ * Purpose:
+ * This function walks through the Linux page tables to convert a userland
+ * virtual address to physical address
+ */
+u32 user_va2pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*============================================
+ * This function maps MPU buffer to the DSP address space. It performs
+* linear to physical address translation if required. It translates each
+* page since linear addresses can be physically non-contiguous
+* All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ */
+int ducati_mem_map(u32 mpu_addr, u32 ul_virt_addr,
+ u32 num_bytes, u32 map_attr)
+{
+ u32 attrs;
+ int status = 0;
+ struct hw_mmu_map_attrs_t hw_attrs;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ struct task_struct *curr_task = current;
+ u32 write = 0;
+ u32 da = ul_virt_addr;
+ u32 pa = 0;
+ int pg_i = 0;
+ int pg_num = 0;
+ struct page *mappedPage, *pg;
+ int num_usr_pages = 0;
+
+ DPRINTK("> WMD_BRD_MemMap pa %x, va %x, "
+ "size %x, map_attr %x\n", mpu_addr, ul_virt_addr,
+ num_bytes, map_attr);
+ if (num_bytes == 0)
+ return -EINVAL;
+ if (map_attr != 0) {
+ attrs = map_attr;
+ } else {
+ /* Assign default attributes */
+ attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
+ }
+ /* Take mapping properties */
+ if (attrs & DSP_MAPBIGENDIAN)
+ hw_attrs.endianism = HW_BIG_ENDIAN;
+ else
+ hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+ hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
+ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+ /* Ignore element_size if mixedSize is enabled */
+ if (hw_attrs.mixedSize == 0) {
+ if (attrs & DSP_MAPELEMSIZE8) {
+ /* Size is 8 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
+ } else if (attrs & DSP_MAPELEMSIZE16) {
+ /* Size is 16 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
+ } else if (attrs & DSP_MAPELEMSIZE32) {
+ /* Size is 32 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
+ } else if (attrs & DSP_MAPELEMSIZE64) {
+ /* Size is 64 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
+ } else {
+ /* Mixedsize isn't enabled, so size can't be
+ * zero here */
+ DPRINTK("WMD_BRD_MemMap: MMU element size is zero\n");
+ return -EINVAL;
+ }
+ }
+ /*
+ * Do OS-specific user-va to pa translation.
+ * Combine physically contiguous regions to reduce TLBs.
+ * Pass the translated pa to PteUpdate.
+ */
+ if ((attrs & DSP_MAPPHYSICALADDR)) {
+ status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
+ &hw_attrs);
+ goto func_cont;
+ }
+ /*
+ * Important Note: mpu_addr is mapped from user application process
+ * to current process - it must lie completely within the current
+ * virtual memory address space in order to be of use to us here!
+ */
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, mpu_addr);
+ /*
+ * It is observed that under some circumstances, the user buffer is
+ * spread across several VMAs. So loop through and check if the entire
+ * user buffer is covered
+ */
+ while ((vma) && (mpu_addr + num_bytes > vma->vm_end)) {
+ /* jump to the next VMA region */
+ vma = find_vma(mm, vma->vm_end + 1);
+ }
+ if (!vma) {
+ status = -EINVAL;
+ up_read(&mm->mmap_sem);
+ goto func_cont;
+ }
+ if (vma->vm_flags & VM_IO) {
+ num_usr_pages = num_bytes / PAGE_SIZE;
+ /* Get the physical addresses for user buffer */
+ for (pg_i = 0; pg_i < num_usr_pages; pg_i++) {
+ pa = user_va2pa(mm, mpu_addr);
+ if (!pa) {
+ status = -EFAULT;
+ pr_err("DSPBRIDGE: VM_IO mapping physical"
+ "address is invalid\n");
+ break;
+ }
+ if (pfn_valid(__phys_to_pfn(pa))) {
+ pg = phys_to_page(pa);
+ get_page(pg);
+ if (page_count(pg) < 1) {
+ pr_err("Bad page in VM_IO buffer\n");
+ bad_page_dump(pa, pg);
+ }
+ }
+ status = pte_set(pa, da, HW_PAGE_SIZE_4KB, &hw_attrs);
+ if (WARN_ON(status < 0))
+ break;
+ mpu_addr += HW_PAGE_SIZE_4KB;
+ da += HW_PAGE_SIZE_4KB;
+ }
+ } else {
+ num_usr_pages = num_bytes / PAGE_SIZE;
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ for (pg_i = 0; pg_i < num_usr_pages; pg_i++) {
+ pg_num = get_user_pages(curr_task, mm, mpu_addr, 1,
+ write, 1, &mappedPage, NULL);
+ if (pg_num > 0) {
+ if (page_count(mappedPage) < 1) {
+ pr_err("Bad page count after doing"
+ "get_user_pages on"
+ "user buffer\n");
+ bad_page_dump(page_to_phys(mappedPage),
+ mappedPage);
+ }
+ status = pte_set(page_to_phys(mappedPage), da,
+ HW_PAGE_SIZE_4KB, &hw_attrs);
+ if (WARN_ON(status < 0))
+ break;
+ da += HW_PAGE_SIZE_4KB;
+ mpu_addr += HW_PAGE_SIZE_4KB;
+ } else {
+ pr_err("DSPBRIDGE: get_user_pages FAILED,"
+ "MPU addr = 0x%x,"
+ "vma->vm_flags = 0x%lx,"
+ "get_user_pages Err"
+ "Value = %d, Buffer"
+ "size=0x%x\n", mpu_addr,
+ vma->vm_flags, pg_num,
+ num_bytes);
+ status = -EFAULT;
+ break;
+ }
+ }
+ }
+ up_read(&mm->mmap_sem);
+func_cont:
+ /* Don't propogate Linux or HW status to upper layers */
+ if (status < 0) {
+ /*
+ * Roll out the mapped pages incase it failed in middle of
+ * mapping
+ */
+ if (pg_i)
+ ducati_mem_unmap(ul_virt_addr, (pg_i * PAGE_SIZE));
+ }
+ WARN_ON(status < 0);
+ DPRINTK("< WMD_BRD_MemMap status %x\n", status);
+ return status;
+
+}
+
+ /*=========================================
+ * Decides a TLB entry size
+ *
+ */
+static int get_mmu_entry_size(u32 pa, u32 size, enum pagetype *size_tlb,
+ u32 *entry_size)
+{
+ int status = 0;
+ bool page_align_4kb = false;
+ bool page_align_64kb = false;
+ bool page_align_1mb = false;
+ bool page_align_16mb = false;
+ u32 phys_addr = pa;
+
+ /* First check the page alignment*/
+ if ((phys_addr % PAGE_SIZE_4KB) == 0)
+ page_align_4kb = true;
+ if ((phys_addr % PAGE_SIZE_64KB) == 0)
+ page_align_64kb = true;
+ if ((phys_addr % PAGE_SIZE_1MB) == 0)
+ page_align_1mb = true;
+ if ((phys_addr % PAGE_SIZE_16MB) == 0)
+ page_align_16mb = true;
+
+ if ((!page_align_64kb) && (!page_align_1mb) && (!page_align_4kb)) {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ /* Now decide the entry size */
+ if (size >= PAGE_SIZE_16MB) {
+ if (page_align_16mb) {
+ *size_tlb = SUPER_SECTION;
+ *entry_size = PAGE_SIZE_16MB;
+ } else if (page_align_1mb) {
+ *size_tlb = SECTION;
+ *entry_size = PAGE_SIZE_1MB;
+ } else if (page_align_64kb) {
+ *size_tlb = LARGE_PAGE;
+ *entry_size = PAGE_SIZE_64KB;
+ } else if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
+ if (page_align_1mb) {
+ *size_tlb = SECTION;
+ *entry_size = PAGE_SIZE_1MB;
+ } else if (page_align_64kb) {
+ *size_tlb = LARGE_PAGE;
+ *entry_size = PAGE_SIZE_64KB;
+ } else if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else if (size > PAGE_SIZE_4KB &&
+ size < PAGE_SIZE_1MB) {
+ if (page_align_64kb) {
+ *size_tlb = LARGE_PAGE;
+ *entry_size = PAGE_SIZE_64KB;
+ } else if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else if (size == PAGE_SIZE_4KB) {
+ if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+
+ DPRINTK("< GetMMUEntrySize status %x\n", status);
+ return 0;
+error_exit:
+ DPRINTK("< GetMMUEntrySize FAILED !!!!!!\n");
+ return status;
+}
+
+/*=========================================
+ * Add DSP MMU entries corresponding to given MPU-Physical address
+ * and DSP-virtual address
+ */
+static int add_dsp_mmu_entry(u32 *phys_addr, u32 *dsp_addr,
+ u32 size)
+{
+ u32 mapped_size = 0;
+ enum pagetype size_tlb = SECTION;
+ u32 entry_size = 0;
+ int status = 0;
+ struct iotlb_entry tlb_entry;
+ int retval = 0;
+
+
+ DPRINTK("Entered add_dsp_mmu_entry phys_addr = "
+ "0x%x, dsp_addr = 0x%x,size = 0x%x\n",
+ *phys_addr, *dsp_addr, size);
+
+ while ((mapped_size < size) && (status == 0)) {
+ status = get_mmu_entry_size(*phys_addr,
+ (size - mapped_size), &size_tlb, &entry_size);
+ if (status < 0)
+ goto error_exit;
+
+ if (size_tlb == SUPER_SECTION)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
+
+ else if (size_tlb == SECTION)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
+
+ else if (size_tlb == LARGE_PAGE)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
+
+ else if (size_tlb == SMALL_PAGE)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
+
+ tlb_entry.elsz = MMU_RAM_ELSZ_16;
+ tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
+ tlb_entry.mixed = MMU_RAM_MIXED;
+ tlb_entry.prsvd = MMU_CAM_P;
+ tlb_entry.valid = MMU_CAM_V;
+
+ tlb_entry.da = *dsp_addr;
+ tlb_entry.pa = *phys_addr;
+ DPRINTK("pte set ducati_iommu_ptr = 0x%x, tlb_entry = 0x%x \n",
+ ducati_iommu_ptr, tlb_entry);
+ retval = load_iotlb_entry(ducati_iommu_ptr, &tlb_entry);
+ if (retval < 0)
+ goto error_exit;
+ mapped_size += entry_size;
+ *phys_addr += entry_size;
+ *dsp_addr += entry_size;
+ }
+
+ return 0;
+error_exit:
+ printk(KERN_ERR "pte set failure retval = 0x%x, status = 0x%x \n",
+ retval, status);
+ return retval;
+}
+
+
+/*=============================================
+ * Add DSP MMU entries corresponding to given MPU-Physical address
+ * and DSP-virtual address
+ *
+ */
+#if 0
+static int add_entry_ext(u32 *phys_addr, u32 *dsp_addr,
+ u32 size)
+{
+ u32 mapped_size = 0;
+ enum pagetype size_tlb = SECTION;
+ u32 entry_size = 0;
+ int status = 0;
+ u32 page_size = HW_PAGE_SIZE_1MB;
+ u32 flags = 0;
+
+ flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
+ DSP_MAPPHYSICALADDR);
+ while ((mapped_size < size) && (status == 0)) {
+
+ /* get_mmu_entry_size fills the size_tlb and entry_size
+ based on alignment and size of memory to map
+ to DSP - size */
+ status = get_mmu_entry_size(*phys_addr,
+ (size - mapped_size),
+ &size_tlb,
+ &entry_size);
+
+ if (size_tlb == SUPER_SECTION)
+ page_size = HW_PAGE_SIZE_16MB;
+ else if (size_tlb == SECTION)
+ page_size = HW_PAGE_SIZE_1MB;
+ else if (size_tlb == LARGE_PAGE)
+ page_size = HW_PAGE_SIZE_64KB;
+ else if (size_tlb == SMALL_PAGE)
+ page_size = HW_PAGE_SIZE_4KB;
+
+ if (status == 0) {
+
+ ducati_mem_map(*phys_addr,
+ *dsp_addr, page_size, flags);
+ mapped_size += entry_size;
+ *phys_addr += entry_size;
+ *dsp_addr += entry_size;
+ }
+ }
+ return status;
+}
+#endif
+
+void ducati_tlb_dump(void)
+{
+#if defined CONFIG_OMAP_IOMMU_DEBUG_MODULE
+ char *p;
+
+ p = kmalloc(1000, GFP_KERNEL);
+ dump_tlb_entries(ducati_iommu_ptr, p, 1000);
+ printk(KERN_INFO "%8s %8s %2s\n", "cam:", "ram:", "preserved");
+ printk(KERN_INFO "-----------------------------------------\n");
+ printk(KERN_INFO "%s", p);
+ kfree(p);
+#endif
+ return;
+}
+
+/*================================
+ * Initialize the Ducati MMU.
+ */
+int ducati_mmu_init(u32 a_phy_addr)
+{
+ int ret_val = 0;
+ u32 phys_addr = 0;
+ u32 num_l4_entries;
+ u32 i = 0;
+ u32 num_l3_mem_entries = 0;
+ u32 virt_addr = 0;
+
+ num_l4_entries = (sizeof(l4_map) / sizeof(struct mmu_entry));
+ num_l3_mem_entries = sizeof(l3_memory_regions) /
+ sizeof(struct memory_entry);
+
+ DPRINTK("\n Programming Ducati MMU using linear address \n");
+
+ phys_addr = a_phy_addr;
+
+ printk(KERN_ALERT " Programming Ducati memory regions\n");
+ printk(KERN_ALERT "=========================================\n");
+ for (i = 0; i < num_l3_mem_entries; i++) {
+
+ printk(KERN_ALERT "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
+ l3_memory_regions[i].ul_virt_addr,
+ l3_memory_regions[i].ul_size, phys_addr);
+
+ /* OMAP4430 SDC code */
+ /* Adjust below logic if using cacheable shared memory */
+ if (l3_memory_regions[i].ul_virt_addr == \
+ DUCATI_MEM_IPC_HEAP0_ADDR) {
+ shm_phys_addr = phys_addr;
+ }
+ virt_addr = l3_memory_regions[i].ul_virt_addr;
+ ret_val = add_dsp_mmu_entry(&phys_addr, &virt_addr,
+ (l3_memory_regions[i].ul_size));
+
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ }
+
+ printk(KERN_ALERT " Programming Ducati L4 peripherals\n");
+ printk(KERN_ALERT "=========================================\n");
+ for (i = 0; i < num_l4_entries; i++) {
+ printk(KERN_INFO "PA [0x%x] VA [0x%x] size [0x%x]\n",
+ l4_map[i].ul_phy_addr, l4_map[i].ul_virt_addr,
+ l4_map[i].ul_size);
+ virt_addr = l4_map[i].ul_virt_addr;
+ phys_addr = l4_map[i].ul_phy_addr;
+ ret_val = add_dsp_mmu_entry(&phys_addr,
+ &virt_addr, (l4_map[i].ul_size));
+ if (WARN_ON(ret_val < 0)) {
+
+ DPRINTK("**** Failed to map Peripheral ****");
+ DPRINTK("Phys addr [0x%x] Virt addr [0x%x] size [0x%x]",
+ l4_map[i].ul_phy_addr, l4_map[i].ul_virt_addr,
+ l4_map[i].ul_size);
+ DPRINTK(" Status [0x%x]", ret_val);
+ goto error_exit;
+ }
+ }
+ ducati_tlb_dump();
+ return 0;
+error_exit:
+ return ret_val;
+}
+
+
+/*========================================
+ * This sets up the Ducati processor
+ *
+ */
+int ducati_setup(void)
+{
+ int ret_val = 0;
+
+ ducati_iommu_ptr = iommu_get("ducati");
+ if (IS_ERR(ducati_iommu_ptr)) {
+ pr_err("Error iommu_get\n");
+ return -EFAULT;
+ }
+ ret_val = ducati_mmu_init(DUCATI_BASEIMAGE_PHYSICAL_ADDRESS);
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ return 0;
+error_exit:
+ WARN_ON(1);
+ printk(KERN_ERR "DUCATI SETUP FAILED !!!!!\n");
+ return ret_val;
+}
+EXPORT_SYMBOL(ducati_setup);
+
+/*============================================
+ * De-Initialize the Ducati MMU and free the
+ * memory allocation for L1 and L2 pages
+ *
+ */
+void ducati_destroy(void)
+{
+ iommu_put(ducati_iommu_ptr);
+ return;
+}
+EXPORT_SYMBOL(ducati_destroy);
+
+/*============================================
+ * Returns the ducati virtual address for IPC shared memory
+ *
+ */
+u32 get_ducati_virt_mem(void)
+{
+ /*shm_virt_addr = (u32)ioremap(shm_phys_addr, DUCATI_SHARED_IPC_LEN);*/
+ shm_virt_addr = (u32)ioremap(shm_phys_addr, DUCATI_MEM_IPC_SHMEM_LEN);
+ return shm_virt_addr;
+}
+EXPORT_SYMBOL(get_ducati_virt_mem);
+
+/*============================================
+ * Unmaps the ducati virtual address for IPC shared memory
+ *
+ */
+void unmap_ducati_virt_mem(u32 shm_virt_addr)
+{
+ iounmap((unsigned int *) shm_virt_addr);
+ return;
+}
+EXPORT_SYMBOL(unmap_ducati_virt_mem);
diff --git a/drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c b/drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c
new file mode 100755
index 000000000000..ba0547456ab3
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c
@@ -0,0 +1,661 @@
+/*
+ * hw_mbox.c
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include<linux/kernel.h>
+#include<linux/module.h>
+
+#include <syslink/GlobalTypes.h>
+#include <syslink/MMURegAcM.h>
+#include <syslink/hw_defs.h>
+#include <syslink/hw_mmu.h>
+
+#define MMU_BASE_VAL_MASK 0xFC00
+#define MMU_PAGE_MAX 3
+#define MMU_ELEMENTSIZE_MAX 3
+#define MMU_ADDR_MASK 0xFFFFF000
+#define MMU_TTB_MASK 0xFFFFC000
+#define MMU_SECTION_ADDR_MASK 0xFFF00000
+#define MMU_SSECTION_ADDR_MASK 0xFF000000
+#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
+#define MMU_LARGE_PAGE_MASK 0xFFFF0000
+#define MMU_SMALL_PAGE_MASK 0xFFFFF000
+
+#define MMU_LOAD_TLB 0x00000001
+#define NUM_TLB_ENTRIES 32
+
+
+
+/*
+* type: hw_mmu_pgsiz_t
+*
+* desc: Enumerated Type used to specify the MMU Page Size(SLSS)
+*
+*
+*/
+enum hw_mmu_pgsiz_t {
+ HW_MMU_SECTION,
+ HW_MMU_LARGE_PAGE,
+ HW_MMU_SMALL_PAGE,
+ HW_MMU_SUPERSECTION
+
+};
+
+/*
+* function : mmu_flsh_entry
+*/
+
+static hw_status mmu_flsh_entry(const u32 base_address);
+
+ /*
+* function : mme_set_cam_entry
+*
+*/
+
+static hw_status mme_set_cam_entry(const u32 base_address,
+ const u32 page_size,
+ const u32 preserve_bit,
+ const u32 valid_bit,
+ const u32 virt_addr_tag);
+
+/*
+* function : mmu_set_ram_entry
+*/
+static hw_status mmu_set_ram_entry(const u32 base_address,
+ const u32 physical_addr,
+ enum hw_endianism_t endianism,
+ enum hw_elemnt_siz_t element_size,
+ enum hw_mmu_mixed_size_t mixedSize);
+
+/*
+* hw functions
+*
+*/
+
+hw_status hw_mmu_enable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLMMUEnableWrite32(base_address, HW_SET);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_enable);
+
+hw_status hw_mmu_disable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLMMUEnableWrite32(base_address, HW_CLEAR);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_disable);
+
+hw_status hw_mmu_autoidle_en(const u32 base_address)
+{
+ hw_status status;
+
+ status = mmu_sisconf_auto_idle_set32(base_address, HW_SET);
+ status = RET_OK;
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_autoidle_en);
+
+hw_status hw_mmu_nulck_set(const u32 base_address, u32 *num_lcked_entries)
+{
+ hw_status status = RET_OK;
+
+ *num_lcked_entries = MMUMMU_LOCKBaseValueRead32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_nulck_set);
+
+
+hw_status hw_mmu_numlocked_set(const u32 base_address, u32 num_lcked_entries)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_LOCKBaseValueWrite32(base_address, num_lcked_entries);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_numlocked_set);
+
+
+hw_status hw_mmu_vctm_numget(const u32 base_address, u32 *vctm_entry_num)
+{
+ hw_status status = RET_OK;
+
+ *vctm_entry_num = MMUMMU_LOCKCurrentVictimRead32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_vctm_numget);
+
+
+hw_status hw_mmu_victim_numset(const u32 base_address, u32 vctm_entry_num)
+{
+ hw_status status = RET_OK;
+
+ mmu_lck_crnt_vctmwite32(base_address, vctm_entry_num);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_victim_numset);
+
+hw_status hw_mmu_tlb_flushAll(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_GFLUSHGlobalFlushWrite32(base_address, HW_SET);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_flushAll);
+
+hw_status hw_mmu_eventack(const u32 base_address, u32 irq_mask)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_IRQSTATUSWriteRegister32(base_address, irq_mask);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_eventack);
+
+hw_status hw_mmu_event_disable(const u32 base_address, u32 irq_mask)
+{
+ hw_status status = RET_OK;
+ u32 irqReg;
+ irqReg = MMUMMU_IRQENABLEReadRegister32(base_address);
+
+ MMUMMU_IRQENABLEWriteRegister32(base_address, irqReg & ~irq_mask);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_event_disable);
+
+hw_status hw_mmu_event_enable(const u32 base_address, u32 irq_mask)
+{
+ hw_status status = RET_OK;
+ u32 irqReg;
+
+ irqReg = MMUMMU_IRQENABLEReadRegister32(base_address);
+
+ MMUMMU_IRQENABLEWriteRegister32(base_address, irqReg | irq_mask);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_event_enable);
+
+hw_status hw_mmu_event_status(const u32 base_address, u32 *irq_mask)
+{
+ hw_status status = RET_OK;
+
+ *irq_mask = MMUMMU_IRQSTATUSReadRegister32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_event_status);
+
+hw_status hw_mmu_flt_adr_rd(const u32 base_address, u32 *addr)
+{
+ hw_status status = RET_OK;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ /* read values from register */
+ *addr = MMUMMU_FAULT_ADReadRegister32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_flt_adr_rd);
+
+
+hw_status hw_mmu_ttbset(const u32 base_address, u32 ttb_phys_addr)
+{
+ hw_status status = RET_OK;
+ u32 loadTTB;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ loadTTB = ttb_phys_addr & ~0x7FUL;
+ /* write values to register */
+ MMUMMU_TTBWriteRegister32(base_address, loadTTB);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_ttbset);
+
+hw_status hw_mmu_twl_enable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLTWLEnableWrite32(base_address, HW_SET);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_twl_enable);
+
+hw_status hw_mmu_twl_disable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLTWLEnableWrite32(base_address, HW_CLEAR);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_twl_disable);
+
+
+hw_status hw_mmu_tlb_flush(const u32 base_address,
+ u32 virtual_addr,
+ u32 page_size)
+{
+ hw_status status = RET_OK;
+ u32 virt_addr_tag;
+ enum hw_mmu_pgsiz_t pg_sizeBits;
+
+ switch (page_size) {
+ case HW_PAGE_SIZE_4KB:
+ pg_sizeBits = HW_MMU_SMALL_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ pg_sizeBits = HW_MMU_LARGE_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ pg_sizeBits = HW_MMU_SECTION;
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ pg_sizeBits = HW_MMU_SUPERSECTION;
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ /* Generate the 20-bit tag from virtual address */
+ virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+ mme_set_cam_entry(base_address, pg_sizeBits, 0, 0, virt_addr_tag);
+
+ mmu_flsh_entry(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_flush);
+
+
+hw_status hw_mmu_tlb_add(const u32 base_address,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_size,
+ u32 entryNum,
+ struct hw_mmu_map_attrs_t *map_attrs,
+ enum hw_set_clear_t preserve_bit,
+ enum hw_set_clear_t valid_bit)
+{
+ hw_status status = RET_OK;
+ u32 lockReg;
+ u32 virt_addr_tag;
+ enum hw_mmu_pgsiz_t mmu_pg_size;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ CHECK_INPUT_RANGE_MIN0(page_size, MMU_PAGE_MAX, RET_PARAM_OUT_OF_RANGE,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ CHECK_INPUT_RANGE_MIN0(map_attrs->element_size,
+ MMU_ELEMENTSIZE_MAX, RET_PARAM_OUT_OF_RANGE,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ switch (page_size) {
+ case HW_PAGE_SIZE_4KB:
+ mmu_pg_size = HW_MMU_SMALL_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ mmu_pg_size = HW_MMU_LARGE_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ mmu_pg_size = HW_MMU_SECTION;
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ mmu_pg_size = HW_MMU_SUPERSECTION;
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ lockReg = mmu_lckread_reg_32(base_address);
+
+ /* Generate the 20-bit tag from virtual address */
+ virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+ /* Write the fields in the CAM Entry Register */
+ mme_set_cam_entry(base_address, mmu_pg_size, preserve_bit, valid_bit,
+ virt_addr_tag);
+
+ /* Write the different fields of the RAM Entry Register */
+ /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
+ mmu_set_ram_entry(base_address, physical_addr,
+ map_attrs->endianism, map_attrs->element_size, map_attrs->mixedSize);
+
+ /* Update the MMU Lock Register */
+ /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
+ mmu_lck_crnt_vctmwite32(base_address, entryNum);
+
+ /* Enable loading of an entry in TLB by writing 1 into LD_TLB_REG
+ register */
+ mmu_ld_tlbwrt_reg32(base_address, MMU_LOAD_TLB);
+
+
+ mmu_lck_write_reg32(base_address, lockReg);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_add);
+
+
+
+hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_size,
+ struct hw_mmu_map_attrs_t *map_attrs)
+{
+ hw_status status = RET_OK;
+ u32 pte_addr, pte_val;
+ long int num_entries = 1;
+
+ switch (page_size) {
+
+ case HW_PAGE_SIZE_4KB:
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
+ MMU_SMALL_PAGE_MASK);
+ pte_val = ((physical_addr & MMU_SMALL_PAGE_MASK) |
+ (map_attrs->endianism << 9) |
+ (map_attrs->element_size << 4) |
+ (map_attrs->mixedSize << 11) | 2
+ );
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
+ MMU_LARGE_PAGE_MASK);
+ pte_val = ((physical_addr & MMU_LARGE_PAGE_MASK) |
+ (map_attrs->endianism << 9) |
+ (map_attrs->element_size << 4) |
+ (map_attrs->mixedSize << 11) | 1
+ );
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ pte_val = ((((physical_addr & MMU_SECTION_ADDR_MASK) |
+ (map_attrs->endianism << 15) |
+ (map_attrs->element_size << 10) |
+ (map_attrs->mixedSize << 17)) &
+ ~0x40000) | 0x2
+ );
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
+ MMU_SSECTION_ADDR_MASK);
+ pte_val = (((physical_addr & MMU_SSECTION_ADDR_MASK) |
+ (map_attrs->endianism << 15) |
+ (map_attrs->element_size << 10) |
+ (map_attrs->mixedSize << 17)
+ ) | 0x40000 | 0x2
+ );
+ break;
+
+ case HW_MMU_COARSE_PAGE_SIZE:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ while (--num_entries >= 0)
+ ((u32 *)pte_addr)[num_entries] = pte_val;
+
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_pte_set);
+
+hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
+ u32 virtual_addr,
+ u32 pg_size)
+{
+ hw_status status = RET_OK;
+ u32 pte_addr;
+ long int num_entries = 1;
+
+ switch (pg_size) {
+ case HW_PAGE_SIZE_4KB:
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr & MMU_SMALL_PAGE_MASK);
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr & MMU_LARGE_PAGE_MASK);
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ case HW_MMU_COARSE_PAGE_SIZE:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr & MMU_SECTION_ADDR_MASK);
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr & MMU_SSECTION_ADDR_MASK);
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ while (--num_entries >= 0)
+ ((u32 *)pte_addr)[num_entries] = 0;
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_pte_clear);
+
+/*
+* function: mmu_flsh_entry
+*/
+static hw_status mmu_flsh_entry(const u32 base_address)
+{
+ hw_status status = RET_OK;
+ u32 flushEntryData = 0x1;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ /* write values to register */
+ MMUMMU_FLUSH_ENTRYWriteRegister32(base_address, flushEntryData);
+
+ return status;
+}
+EXPORT_SYMBOL(mmu_flsh_entry);
+/*
+* function : mme_set_cam_entry
+*/
+static hw_status mme_set_cam_entry(const u32 base_address,
+ const u32 page_size,
+ const u32 preserve_bit,
+ const u32 valid_bit,
+ const u32 virt_addr_tag)
+{
+ hw_status status = RET_OK;
+ u32 mmuCamReg;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ mmuCamReg = (virt_addr_tag << 12);
+ mmuCamReg = (mmuCamReg) | (page_size) | (valid_bit << 2)
+ | (preserve_bit << 3);
+
+ /* write values to register */
+ MMUMMU_CAMWriteRegister32(base_address, mmuCamReg);
+
+ return status;
+}
+EXPORT_SYMBOL(mme_set_cam_entry);
+/*
+* function: mmu_set_ram_entry
+*/
+static hw_status mmu_set_ram_entry(const u32 base_address,
+ const u32 physical_addr,
+ enum hw_endianism_t endianism,
+ enum hw_elemnt_siz_t element_size,
+ enum hw_mmu_mixed_size_t mixedSize)
+{
+ hw_status status = RET_OK;
+ u32 mmuRamReg;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ CHECK_INPUT_RANGE_MIN0(element_size, MMU_ELEMENTSIZE_MAX,
+ RET_PARAM_OUT_OF_RANGE,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+
+ mmuRamReg = (physical_addr & MMU_ADDR_MASK);
+ mmuRamReg = (mmuRamReg) | ((endianism << 9) | (element_size << 7)
+ | (mixedSize << 6));
+
+ /* write values to register */
+ MMUMMU_RAMWriteRegister32(base_address, mmuRamReg);
+
+ return status;
+
+}
+EXPORT_SYMBOL(mmu_set_ram_entry);
+
+u32 hw_mmu_fault_dump(const u32 base_address)
+{
+ u32 reg;
+
+ reg = MMUMMU_FAULT_ADReadRegister32(base_address);
+ printk(KERN_INFO "Fault Address Address = 0x%x\n", reg);
+ reg = MMUMMU_FAULT_PCReadRegister32(base_address);
+ printk(KERN_INFO "Fault PC Register Address = 0x%x\n", reg);
+ reg = MMUMMU_FAULT_STATUSReadRegister32(base_address);
+ printk(KERN_INFO "Fault PC address doesn't show right value in DUCATI"
+ "because of HW limitation\n");
+ printk(KERN_INFO "Fault Status Register = 0x%x\n", reg);
+ reg = MMUMMU_FAULT_EMUAddressReadRegister32(base_address);
+ printk(KERN_INFO "Fault EMU Address = 0x%x\n", reg);
+ return 0;
+}
+EXPORT_SYMBOL(hw_mmu_fault_dump);
+
+long hw_mmu_tlb_dump(const u32 base_address, bool shw_inv_entries)
+{
+ u32 i;
+ u32 lockSave;
+ u32 cam;
+ u32 ram;
+
+
+ /* Save off the lock register contents,
+ we'll restore it when we are done */
+
+ lockSave = mmu_lckread_reg_32(base_address);
+
+ printk(KERN_INFO "TLB locked entries = %u, current victim = %u\n",
+ ((lockSave & MMU_MMU_LOCK_BaseValue_MASK)
+ >> MMU_MMU_LOCK_BaseValue_OFFSET),
+ ((lockSave & MMU_MMU_LOCK_CurrentVictim_MASK)
+ >> MMU_MMU_LOCK_CurrentVictim_OFFSET));
+ printk(KERN_INFO "=============================================\n");
+ for (i = 0; i < NUM_TLB_ENTRIES; i++) {
+ mmu_lck_crnt_vctmwite32(base_address, i);
+ cam = MMUMMU_CAMReadRegister32(base_address);
+ ram = MMUMMU_RAMReadRegister32(base_address);
+
+ if ((cam & 0x4) != 0) {
+ printk(KERN_INFO "TLB Entry [0x%2x]: VA = 0x%8x "
+ "PA = 0x%8x Protected = 0x%1x\n",
+ i, (cam & MMU_ADDR_MASK), (ram & MMU_ADDR_MASK),
+ (cam & 0x8) ? 1 : 0);
+
+ } else if (shw_inv_entries != false)
+ printk(KERN_ALERT "TLB Entry [0x%x]: <INVALID>\n", i);
+ }
+ mmu_lck_write_reg32(base_address, lockSave);
+ return RET_OK;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_dump);
+
+u32 hw_mmu_pte_phyaddr(u32 pte_val, u32 pte_size)
+{
+ u32 ret_val = 0;
+
+ switch (pte_size) {
+
+ case HW_PAGE_SIZE_4KB:
+ ret_val = pte_val & MMU_SMALL_PAGE_MASK;
+ break;
+ case HW_PAGE_SIZE_64KB:
+ ret_val = pte_val & MMU_LARGE_PAGE_MASK;
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ ret_val = pte_val & MMU_SECTION_ADDR_MASK;
+ break;
+ case HW_PAGE_SIZE_16MB:
+ ret_val = pte_val & MMU_SSECTION_ADDR_MASK;
+ break;
+ default:
+ /* Invalid */
+ break;
+
+ }
+
+ return ret_val;
+}
+EXPORT_SYMBOL(hw_mmu_pte_phyaddr);
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430.c b/drivers/dsp/syslink/procmgr/proc4430/proc4430.c
new file mode 100644
index 000000000000..528a7aaf44a7
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430.c
@@ -0,0 +1,1053 @@
+/*
+ * proc4430.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+/* Module level headers */
+#include "../procdefs.h"
+#include "../processor.h"
+#include <procmgr.h>
+#include "../procmgr_drvdefs.h"
+#include "proc4430.h"
+#include "dmm4430.h"
+#include <syslink/multiproc.h>
+#include <syslink/ducatienabler.h>
+#include <syslink/platform_mem.h>
+#include <syslink/atomic_linux.h>
+
+#define DUCATI_DMM_START_ADDR 0xa0000000
+#define DUCATI_DMM_POOL_SIZE 0x6000000
+
+#define SYS_M3 2
+#define APP_M3 3
+#define CORE_PRM_BASE OMAP2_L4_IO_ADDRESS(0x4a306700)
+#define CORE_CM2_DUCATI_CLKSTCTRL OMAP2_L4_IO_ADDRESS(0x4A008900)
+#define CORE_CM2_DUCATI_CLKCTRL OMAP2_L4_IO_ADDRESS(0x4A008920)
+#define RM_MPU_M3_RSTCTRL_OFFSET 0x210
+#define RM_MPU_M3_RSTST_OFFSET 0x214
+#define RM_MPU_M3_RST1 0x1
+#define RM_MPU_M3_RST2 0x2
+#define RM_MPU_M3_RST3 0x4
+
+#define OMAP4430PROC_MODULEID (u16) 0xbbec
+
+/* Macro to make a correct module magic number with refCount */
+#define OMAP4430PROC_MAKE_MAGICSTAMP(x) ((OMAP4430PROC_MODULEID << 12u) | (x))
+
+/*OMAP4430 Module state object */
+struct proc4430_module_object {
+ u32 config_size;
+ /* Size of configuration structure */
+ struct proc4430_config cfg;
+ /* OMAP4430 configuration structure */
+ struct proc4430_config def_cfg;
+ /* Default module configuration */
+ struct proc4430_params def_inst_params;
+ /* Default parameters for the OMAP4430 instances */
+ void *proc_handles[MULTIPROC_MAXPROCESSORS];
+ /* Processor handle array. */
+ struct mutex *gate_handle;
+ /* void * of gate to be used for local thread safety */
+ atomic_t ref_count;
+};
+
+/*
+ OMAP4430 instance object.
+ */
+struct proc4430_object {
+ struct proc4430_params params;
+ /* Instance parameters (configuration values) */
+};
+
+
+/* =================================
+ * Globals
+ * =================================
+ */
+/*
+ OMAP4430 state object variable
+ */
+
+static struct proc4430_module_object proc4430_state = {
+ .config_size = sizeof(struct proc4430_config),
+ .gate_handle = NULL,
+ .def_inst_params.num_mem_entries = 0u,
+ .def_inst_params.mem_entries = NULL,
+ .def_inst_params.reset_vector_mem_entry = 0
+};
+
+
+/* =================================
+ * APIs directly called by applications
+ * =================================
+ */
+/*
+ * Function to get the default configuration for the OMAP4430
+ * module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to proc4430_setup filled in by the
+ * OMAP4430 module with the default parameters. If the user
+ * does not wish to make any change in the default parameters, this
+ * API is not required to be called.
+ */
+void proc4430_get_config(struct proc4430_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ memcpy(cfg, &(proc4430_state.def_cfg),
+ sizeof(struct proc4430_config));
+}
+EXPORT_SYMBOL(proc4430_get_config);
+
+/*
+ * Function to setup the OMAP4430 module.
+ *
+ * This function sets up the OMAP4430 module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then proc4430_get_config can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. If the user
+ * does not wish to make any change in the default parameters, the
+ * application can simply call proc4430_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ */
+int proc4430_setup(struct proc4430_config *cfg)
+{
+ int retval = 0;
+ struct proc4430_config tmp_cfg;
+ atomic_cmpmask_and_set(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&proc4430_state.ref_count) !=
+ OMAP4430PROC_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ proc4430_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ dmm_create();
+ dmm_create_tables(DUCATI_DMM_START_ADDR, DUCATI_DMM_POOL_SIZE);
+
+ /* Create a default gate handle for local module protection. */
+ proc4430_state.gate_handle =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (proc4430_state.gate_handle == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ mutex_init(proc4430_state.gate_handle);
+
+ /* Initialize the name to handles mapping array. */
+ memset(&proc4430_state.proc_handles, 0,
+ (sizeof(void *) * MULTIPROC_MAXPROCESSORS));
+
+ /* Copy the user provided values into the state object. */
+ memcpy(&proc4430_state.cfg, cfg,
+ sizeof(struct proc4430_config));
+
+ return 0;
+
+error:
+ atomic_dec_return(&proc4430_state.ref_count);
+ dmm_delete_tables();
+ dmm_destroy();
+
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_setup);
+
+/*
+ * Function to destroy the OMAP4430 module.
+ *
+ * Once this function is called, other OMAP4430 module APIs,
+ * except for the proc4430_get_config API cannot be called
+ * anymore.
+ */
+int proc4430_destroy(void)
+{
+ int retval = 0;
+ u16 i;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (!(atomic_dec_return(&proc4430_state.ref_count)
+ == OMAP4430PROC_MAKE_MAGICSTAMP(0))) {
+
+ retval = 1;
+ goto exit;
+ }
+
+ /* Check if any OMAP4430 instances have not been
+ * deleted so far. If not,delete them.
+ */
+
+ for (i = 0; i < MULTIPROC_MAXPROCESSORS; i++) {
+ if (proc4430_state.proc_handles[i] == NULL)
+ continue;
+ proc4430_delete(&(proc4430_state.proc_handles[i]));
+ }
+
+ /* Check if the gate_handle was created internally. */
+ if (proc4430_state.gate_handle != NULL) {
+ mutex_destroy(proc4430_state.gate_handle);
+ kfree(proc4430_state.gate_handle);
+ }
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_destroy);
+
+/*=================================================
+ * Function to initialize the parameters for this Processor
+ * instance.
+ */
+void proc4430_params_init(void *handle, struct proc4430_params *params)
+{
+ struct proc4430_object *proc_object = (struct proc4430_object *)handle;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_params_init failed "
+ "Module not initialized");
+ return;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "proc4430_params_init failed "
+ "Argument of type proc4430_params * "
+ "is NULL");
+ return;
+ }
+
+ if (handle == NULL)
+ memcpy(params, &(proc4430_state.def_inst_params),
+ sizeof(struct proc4430_params));
+ else
+ memcpy(params, &(proc_object->params),
+ sizeof(struct proc4430_params));
+}
+EXPORT_SYMBOL(proc4430_params_init);
+
+/*===================================================
+ *Function to create an instance of this Processor.
+ *
+ */
+void *proc4430_create(u16 proc_id, const struct proc4430_params *params)
+{
+ struct processor_object *handle = NULL;
+ struct proc4430_object *object = NULL;
+
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_create failed "
+ "Module not initialized");
+ goto error;
+ }
+
+ /* Enter critical section protection. */
+ WARN_ON(mutex_lock_interruptible(proc4430_state.gate_handle));
+ if (proc4430_state.proc_handles[proc_id] != NULL) {
+ handle = proc4430_state.proc_handles[proc_id];
+ goto func_end;
+ } else {
+ handle = (struct processor_object *)
+ vmalloc(sizeof(struct processor_object));
+ if (WARN_ON(handle == NULL))
+ goto func_end;
+
+ handle->proc_fxn_table.attach = &proc4430_attach;
+ handle->proc_fxn_table.detach = &proc4430_detach;
+ handle->proc_fxn_table.start = &proc4430_start;
+ handle->proc_fxn_table.stop = &proc4430_stop;
+ handle->proc_fxn_table.read = &proc4430_read;
+ handle->proc_fxn_table.write = &proc4430_write;
+ handle->proc_fxn_table.control = &proc4430_control;
+ handle->proc_fxn_table.translateAddr =
+ &proc4430_translate_addr;
+ handle->proc_fxn_table.map = &proc4430_map;
+ handle->proc_fxn_table.unmap = &proc4430_unmap;
+ handle->proc_fxn_table.procinfo = &proc4430_proc_info;
+ handle->proc_fxn_table.virt_to_phys = &proc4430_virt_to_phys;
+ handle->state = PROC_MGR_STATE_UNKNOWN;
+ handle->object = vmalloc(sizeof(struct proc4430_object));
+ handle->proc_id = proc_id;
+ object = (struct proc4430_object *)handle->object;
+ if (params != NULL) {
+ /* Copy params into instance object. */
+ memcpy(&(object->params), (void *)params,
+ sizeof(struct proc4430_params));
+ }
+ if ((params != NULL) && (params->mem_entries != NULL)
+ && (params->num_mem_entries > 0)) {
+ /* Allocate memory for, and copy mem_entries table*/
+ object->params.mem_entries = vmalloc(sizeof(struct
+ proc4430_mem_entry) *
+ params->num_mem_entries);
+ memcpy(object->params.mem_entries,
+ params->mem_entries,
+ (sizeof(struct proc4430_mem_entry) *
+ params->num_mem_entries));
+ }
+ handle->boot_mode = PROC_MGR_BOOTMODE_NOLOAD;
+ /* Set the handle in the state object. */
+ proc4430_state.proc_handles[proc_id] = handle;
+ }
+
+func_end:
+ mutex_unlock(proc4430_state.gate_handle);
+error:
+ return (void *)handle;
+}
+EXPORT_SYMBOL(proc4430_create);
+
+/*=================================================
+ * Function to delete an instance of this Processor.
+ *
+ * The user provided pointer to the handle is reset after
+ * successful completion of this function.
+ *
+ */
+int proc4430_delete(void **handle_ptr)
+{
+ int retval = 0;
+ struct proc4430_object *object = NULL;
+ struct processor_object *handle;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(*handle_ptr == NULL);
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_delete failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ handle = (struct processor_object *)(*handle_ptr);
+ BUG_ON(!IS_VALID_PROCID(handle->proc_id));
+ /* Enter critical section protection. */
+ WARN_ON(mutex_lock_interruptible(proc4430_state.gate_handle));
+ /* Reset handle in PwrMgr handle array. */
+ proc4430_state.proc_handles[handle->proc_id] = NULL;
+ /* Free memory used for the OMAP4430 object. */
+ if (handle->object != NULL) {
+ object = (struct proc4430_object *)handle->object;
+ if (object->params.mem_entries != NULL) {
+ vfree(object->params.mem_entries);
+ object->params.mem_entries = NULL;
+ }
+ vfree(handle->object);
+ handle->object = NULL;
+ }
+ /* Free memory used for the Processor object. */
+ vfree(handle);
+ *handle_ptr = NULL;
+ /* Leave critical section protection. */
+ mutex_unlock(proc4430_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_delete);
+
+/*===================================================
+ * Function to open a handle to an instance of this Processor. This
+ * function is called when access to the Processor is required from
+ * a different process.
+ */
+int proc4430_open(void **handle_ptr, u16 proc_id)
+{
+ int retval = 0;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_open failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* Initialize return parameter handle. */
+ *handle_ptr = NULL;
+
+ /* Check if the PwrMgr exists and return the handle if found. */
+ if (proc4430_state.proc_handles[proc_id] == NULL) {
+ retval = -ENODEV;
+ goto func_exit;
+ } else
+ *handle_ptr = proc4430_state.proc_handles[proc_id];
+func_exit:
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_open);
+
+/*===============================================
+ * Function to close a handle to an instance of this Processor.
+ *
+ */
+int proc4430_close(void *handle)
+{
+ int retval = 0;
+
+ BUG_ON(handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_close failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* nothing to be done for now */
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_close);
+
+/* =================================
+ * APIs called by Processor module (part of function table interface)
+ * =================================
+ */
+/*================================
+ * Function to initialize the slave processor
+ *
+ */
+int proc4430_attach(void *handle, struct processor_attach_params *params)
+{
+ int retval = 0;
+
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ u32 map_count = 0;
+ u32 i;
+ memory_map_info map_info;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_attach failed"
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(handle == NULL)) {
+ printk(KERN_ERR "proc4430_attach failed"
+ "Driver handle is NULL");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "proc4430_attach failed"
+ "Argument processor_attach_params * is NULL");
+ return -EINVAL;
+ }
+
+ proc_handle = (struct processor_object *)handle;
+
+ object = (struct proc4430_object *)proc_handle->object;
+ /* Return memory information in params. */
+ for (i = 0; (i < object->params.num_mem_entries); i++) {
+ /* If the configured master virtual address is invalid, get the
+ * actual address by mapping the physical address into master
+ * kernel memory space.
+ */
+ if ((object->params.mem_entries[i].master_virt_addr == (u32)-1)
+ && (object->params.mem_entries[i].shared == true)) {
+ map_info.src = object->params.mem_entries[i].phys_addr;
+ map_info.size = object->params.mem_entries[i].size;
+ map_info.is_cached = false;
+ retval = platform_mem_map(&map_info);
+ if (retval != 0) {
+ printk(KERN_ERR "proc4430_attach failed\n");
+ return -EFAULT;
+ }
+ map_count++;
+ object->params.mem_entries[i].master_virt_addr =
+ map_info.dst;
+ params->mem_entries[i].addr
+ [PROC_MGR_ADDRTYPE_MASTERKNLVIRT] =
+ map_info.dst;
+ params->mem_entries[i].addr
+ [PROC_MGR_ADDRTYPE_SLAVEVIRT] =
+ (object->params.mem_entries[i].slave_virt_addr);
+ /* User virtual will be filled by user side. For now,
+ fill in the physical address so that it can be used
+ by mmap to remap this region into user-space */
+ params->mem_entries[i].addr
+ [PROC_MGR_ADDRTYPE_MASTERUSRVIRT] = \
+ object->params.mem_entries[i].phys_addr;
+ params->mem_entries[i].size =
+ object->params.mem_entries[i].size;
+ }
+ }
+ params->num_mem_entries = map_count;
+ return retval;
+}
+
+
+/*==========================================
+ * Function to detach from the Processor.
+ *
+ */
+int proc4430_detach(void *handle)
+{
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ u32 i;
+ memory_unmap_info unmap_info;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+
+ printk(KERN_ERR "proc4430_detach failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(handle == NULL)) {
+ printk(KERN_ERR "proc4430_detach failed "
+ "Argument Driverhandle is NULL");
+ return -EINVAL;
+ }
+
+ proc_handle = (struct processor_object *)handle;
+ object = (struct proc4430_object *)proc_handle->object;
+ for (i = 0; (i < object->params.num_mem_entries); i++) {
+ if ((object->params.mem_entries[i].master_virt_addr > 0)
+ && (object->params.mem_entries[i].shared == true)) {
+ unmap_info.addr =
+ object->params.mem_entries[i].master_virt_addr;
+ unmap_info.size = object->params.mem_entries[i].size;
+ platform_mem_unmap(&unmap_info);
+ object->params.mem_entries[i].master_virt_addr =
+ (u32)-1;
+ }
+ }
+ return 0;
+}
+
+/*==========================================
+ * Function to start the slave processor
+ *
+ * Start the slave processor running from its entry point.
+ * Depending on the boot mode, this involves configuring the boot
+ * address and releasing the slave from reset.
+ *
+ */
+int proc4430_start(void *handle, u32 entry_pt,
+ struct processor_start_params *start_params)
+{
+ u32 reg;
+ int counter = 10;
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+
+ printk(KERN_ERR "proc4430_start failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /*FIXME: Remove handle and entry_pt if not used */
+ if (WARN_ON(start_params == NULL)) {
+ printk(KERN_ERR "proc4430_start failed "
+ "Argument processor_start_params * is NULL");
+ return -EINVAL;
+ }
+
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ printk(KERN_INFO "proc4430_start: Reset Status [0x%x]", reg);
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ printk(KERN_INFO "proc4430_start: Reset Control [0x%x]", reg);
+
+ switch (start_params->params->proc_id) {
+ case SYS_M3:
+ /* Module is managed automatically by HW */
+ __raw_writel(0x01, CORE_CM2_DUCATI_CLKCTRL);
+ /* Enable the M3 clock */
+ __raw_writel(0x02, CORE_CM2_DUCATI_CLKSTCTRL);
+ do {
+ reg = __raw_readl(CORE_CM2_DUCATI_CLKSTCTRL);
+ if (reg & 0x100) {
+ printk(KERN_INFO "M3 clock enabled:"
+ "CORE_CM2_DUCATI_CLKSTCTRL = 0x%x\n", reg);
+ break;
+ }
+ msleep(1);
+ } while (--counter);
+ if (counter == 0) {
+ printk(KERN_ERR "FAILED TO ENABLE DUCATI M3 CLOCK !\n");
+ return -EFAULT;
+ }
+ /* Check that releasing resets would indeed be effective */
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ if (reg != 7) {
+ printk(KERN_ERR "proc4430_start: Resets in not proper state!\n");
+ __raw_writel(0x7,
+ CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ }
+
+ /* De-assert RST3, and clear the Reset status */
+ printk(KERN_INFO "De-assert RST3\n");
+ __raw_writel(0x3, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ while (!(__raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET)
+ & 0x4))
+ ;
+ printk(KERN_INFO "RST3 released!");
+ __raw_writel(0x4, CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ ducati_setup();
+
+ /* De-assert RST1, and clear the Reset status */
+ printk(KERN_INFO "De-assert RST1\n");
+ __raw_writel(0x2, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ while (!(__raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET)
+ & 0x1))
+ ;
+ printk(KERN_INFO "RST1 released!");
+ __raw_writel(0x1, CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ break;
+ case APP_M3:
+ /* De-assert RST2, and clear the Reset status */
+ printk(KERN_INFO "De-assert RST2\n");
+ __raw_writel(0x0, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ while (!(__raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET)
+ & 0x2))
+ ;
+ printk(KERN_INFO "RST2 released!");
+ __raw_writel(0x2, CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ break;
+ default:
+ printk(KERN_ERR "proc4430_start: ERROR input\n");
+ break;
+ }
+ return 0;
+}
+
+
+/*
+ * Function to stop the slave processor
+ *
+ * Stop the execution of the slave processor. Depending on the boot
+ * mode, this may result in placing the slave processor in reset.
+ *
+ * @param handle void * to the Processor instance
+ *
+ * @sa proc4430_start, OMAP3530_halResetCtrl
+ */
+int
+proc4430_stop(void *handle, struct processor_stop_params *stop_params)
+{
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_stop failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+ switch (stop_params->params->proc_id) {
+ case SYS_M3:
+ ducati_destroy();
+ printk(KERN_INFO "Assert RST1 and RST2 and RST3\n");
+ __raw_writel(0x7, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ /* Disable the M3 clock */
+ __raw_writel(0x01, CORE_CM2_DUCATI_CLKSTCTRL);
+ break;
+ case APP_M3:
+ printk(KERN_INFO "Assert RST2\n");
+ __raw_writel(0x2, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ break;
+ default:
+ printk(KERN_ERR "proc4430_stop: ERROR input\n");
+ break;
+ }
+ return 0;
+}
+
+
+/*==============================================
+ * Function to read from the slave processor's memory.
+ *
+ * Read from the slave processor's memory and copy into the
+ * provided buffer.
+ */
+int proc4430_read(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer)
+{
+ int retval = 0;
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_read failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* TODO */
+ return retval;
+}
+
+
+/*==============================================
+ * Function to write into the slave processor's memory.
+ *
+ * Read from the provided buffer and copy into the slave
+ * processor's memory.
+ *
+ */
+int proc4430_write(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer)
+{
+ int retval = 0;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_write failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* TODO */
+ return retval;
+}
+
+
+/*=========================================================
+ * Function to perform device-dependent operations.
+ *
+ * Performs device-dependent control operations as exposed by this
+ * implementation of the Processor module.
+ */
+int proc4430_control(void *handle, int cmd, void *arg)
+{
+ int retval = 0;
+
+ /*FIXME: Remove handle,etc if not used */
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_control failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ return retval;
+}
+
+
+/*=====================================================
+ * Function to translate between two types of address spaces.
+ *
+ * Translate between the specified address spaces.
+ */
+int proc4430_translate_addr(void *handle,
+ void **dst_addr, enum proc_mgr_addr_type dst_addr_type,
+ void *src_addr, enum proc_mgr_addr_type src_addr_type)
+{
+ int retval = 0;
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ struct proc4430_mem_entry *entry = NULL;
+ bool found = false;
+ u32 fm_addr_base = (u32)NULL;
+ u32 to_addr_base = (u32)NULL;
+ u32 i;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_translate_addr failed "
+ "Module not initialized");
+ retval = -ENODEV;
+ goto error_exit;
+ }
+
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(dst_addr == NULL)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(dst_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(src_addr == NULL)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(src_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+
+ proc_handle = (struct processor_object *)handle;
+ object = (struct proc4430_object *)proc_handle->object;
+ *dst_addr = NULL;
+ for (i = 0 ; i < object->params.num_mem_entries ; i++) {
+ entry = &(object->params.mem_entries[i]);
+ fm_addr_base =
+ (src_addr_type == PROC_MGR_ADDRTYPE_MASTERKNLVIRT) ?
+ entry->master_virt_addr : entry->slave_virt_addr;
+ to_addr_base =
+ (dst_addr_type == PROC_MGR_ADDRTYPE_MASTERKNLVIRT) ?
+ entry->master_virt_addr : entry->slave_virt_addr;
+ /* Determine whether which way to convert */
+ if (((u32)src_addr < (fm_addr_base + entry->size)) &&
+ ((u32)src_addr >= fm_addr_base)) {
+ found = true;
+ *dst_addr = (void *)(((u32)src_addr - fm_addr_base)
+ + to_addr_base);
+ break;
+ }
+ }
+
+ /* This check must not be removed even with build optimize. */
+ if (WARN_ON(found == false)) {
+ /*Failed to translate address. */
+ retval = -ENXIO;
+ goto error_exit;
+ }
+ return 0;
+
+error_exit:
+ return retval;
+}
+
+
+/*=================================================
+ * Function to map slave address to host address space
+ *
+ * Map the provided slave address to master address space. This
+ * function also maps the specified address to slave MMU space.
+ */
+int proc4430_map(void *handle, u32 proc_addr,
+ u32 size, u32 *mapped_addr, u32 *mapped_size, u32 map_attribs)
+{
+ int retval = 0;
+ u32 da_align;
+ u32 da;
+ u32 va_align;
+ u32 size_align;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_map failed "
+ "Module not initialized");
+ retval = -ENODEV;
+ goto error_exit;
+ }
+
+ /*FIXME: Remove handle,etc if not used */
+
+ /* FIX ME: Temporary work around until the dynamic memory mapping
+ * for Tiler address space is available
+ */
+ if ((map_attribs & DSP_MAPTILERADDR)) {
+ da_align = user_va2pa(current->mm, proc_addr);
+ *mapped_addr = (da_align | (proc_addr & (PAGE_SIZE - 1)));
+ printk(KERN_INFO "proc4430_map -tiler: user_va2pa: mapped_addr"
+ "= 0x%x\n", *mapped_addr);
+ return 0;
+ }
+
+ /* Calculate the page-aligned PA, VA and size */
+ va_align = PG_ALIGN_LOW(proc_addr, PAGE_SIZE);
+ size_align = PG_ALIGN_HIGH(size + (u32)proc_addr - va_align, PAGE_SIZE);
+
+ dmm_reserve_memory(size_align, &da);
+ da_align = PG_ALIGN_LOW((u32)da, PAGE_SIZE);
+ retval = ducati_mem_map(va_align, da_align, size_align, map_attribs);
+
+ /* Mapped address = MSB of DA | LSB of VA */
+ *mapped_addr = (da_align | (proc_addr & (PAGE_SIZE - 1)));
+
+error_exit:
+ return retval;
+}
+
+/*=================================================
+ * Function to unmap slave address to host address space
+ *
+ * UnMap the provided slave address to master address space. This
+ * function also unmaps the specified address to slave MMU space.
+ */
+int proc4430_unmap(void *handle, u32 mapped_addr)
+{
+ int da_align;
+ int ret_val = 0;
+ int size_align;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_map failed "
+ "Module not initialized");
+ ret_val = -1;
+ goto error_exit;
+ }
+
+ /*FIXME: Remove handle,etc if not used */
+
+ da_align = PG_ALIGN_LOW((u32)mapped_addr, PAGE_SIZE);
+ ret_val = dmm_unreserve_memory(da_align, &size_align);
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ ret_val = ducati_mem_unmap(da_align, size_align);
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ return 0;
+
+error_exit:
+ printk(KERN_WARNING "proc4430_unmap failed !!!!\n");
+ return ret_val;
+}
+
+/*=================================================
+ * Function to return list of translated mem entries
+ *
+ * This function takes the remote processor address as
+ * an input and returns the mapped Page entries in the
+ * buffer passed
+ */
+int proc4430_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries)
+{
+ int da_align;
+ int i;
+ int ret_val = 0;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_virt_to_phys failed "
+ "Module not initialized");
+ ret_val = -EFAULT;
+ goto error_exit;
+ }
+
+ if (handle == NULL || mapped_entries == NULL || num_of_entries == 0) {
+ ret_val = -EFAULT;
+ goto error_exit;
+ }
+ da_align = PG_ALIGN_LOW((u32)da, PAGE_SIZE);
+ for (i = 0; i < num_of_entries; i++) {
+ mapped_entries[i] = ducati_mem_virtToPhys(da_align);
+ da_align += PAGE_SIZE;
+ }
+ return 0;
+
+error_exit:
+ printk(KERN_WARNING "proc4430_virtToPhys failed !!!!\n");
+ return ret_val;
+}
+
+
+/*=================================================
+ * Function to return PROC4430 mem_entries info
+ *
+ */
+int proc4430_proc_info(void *handle, struct proc_mgr_proc_info *procinfo)
+{
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ struct proc4430_mem_entry *entry = NULL;
+ int i;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_proc_info failed "
+ "Module not initialized");
+ goto error_exit;
+ }
+
+ if (WARN_ON(handle == NULL))
+ goto error_exit;
+ if (WARN_ON(procinfo == NULL))
+ goto error_exit;
+
+ proc_handle = (struct processor_object *)handle;
+
+ object = (struct proc4430_object *)proc_handle->object;
+
+ for (i = 0 ; i < object->params.num_mem_entries ; i++) {
+ entry = &(object->params.mem_entries[i]);
+ procinfo->mem_entries[i].addr[PROC_MGR_ADDRTYPE_MASTERKNLVIRT]
+ = entry->master_virt_addr;
+ procinfo->mem_entries[i].addr[PROC_MGR_ADDRTYPE_SLAVEVIRT]
+ = entry->slave_virt_addr;
+ procinfo->mem_entries[i].size = entry->size;
+ }
+ procinfo->num_mem_entries = object->params.num_mem_entries;
+ procinfo->boot_mode = proc_handle->boot_mode;
+ return 0;
+
+error_exit:
+ printk(KERN_WARNING "proc4430_proc_info failed !!!!\n");
+ return -EFAULT;
+}
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430.h b/drivers/dsp/syslink/procmgr/proc4430/proc4430.h
new file mode 100755
index 000000000000..5903daeadaa3
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430.h
@@ -0,0 +1,147 @@
+/*
+ * proc4430.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+
+
+#ifndef _SYSLINK_PROC_4430_H_
+#define _SYSLINK_PROC_4430_H_
+
+
+/* Module headers */
+#include <procmgr.h>
+#include "../procdefs.h"
+#include <linux/types.h>
+
+/*
+ Module configuration structure.
+ */
+struct proc4430_config {
+ struct mutex *gate_handle;
+ /* void * of gate to be used for local thread safety */
+};
+
+/*
+ Memory entry for slave memory map configuration
+ */
+struct proc4430_mem_entry {
+ char name[PROCMGR_MAX_STRLEN];
+ /* Name identifying the memory region. */
+ u32 phys_addr;
+ /* Physical address of the memory region. */
+ u32 slave_virt_addr;
+ /* Slave virtual address of the memory region. */
+ u32 master_virt_addr;
+ /* Master virtual address of the memory region. If specified as -1,
+ * the master virtual address is assumed to be invalid, and shall be
+ * set internally within the Processor module. */
+ u32 size;
+ /* Size (in bytes) of the memory region. */
+ bool shared;
+ /* Flag indicating whether the memory region is shared between master
+ * and slave. */
+};
+
+/*
+ Configuration parameters specific to this processor.
+ */
+struct proc4430_params {
+ int num_mem_entries;
+ /* Number of memory regions to be configured. */
+ struct proc4430_mem_entry *mem_entries;
+ /* Array of information structures for memory regions
+ * to be configured. */
+ u32 reset_vector_mem_entry;
+ /* Index of the memory entry within the mem_entries array,
+ * which is the resetVector memory region. */
+};
+
+
+/* Function to initialize the slave processor */
+int proc4430_attach(void *handle, struct processor_attach_params *params);
+
+/* Function to finalize the slave processor */
+int proc4430_detach(void *handle);
+
+/* Function to start the slave processor */
+int proc4430_start(void *handle, u32 entry_pt,
+ struct processor_start_params *params);
+
+/* Function to start the stop processor */
+int proc4430_stop(void *handle,
+ struct processor_stop_params *params);
+
+/* Function to read from the slave processor's memory. */
+int proc4430_read(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer);
+
+/* Function to write into the slave processor's memory. */
+int proc4430_write(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer);
+
+/* Function to perform device-dependent operations. */
+int proc4430_control(void *handle, int cmd, void *arg);
+
+/* Function to translate between two types of address spaces. */
+int proc4430_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type,
+ void *src_addr, enum proc_mgr_addr_type src_addr_type);
+
+/* Function to map slave address to host address space */
+int proc4430_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
+ u32 *mapped_size, u32 map_attribs);
+
+/* Function to unmap the slave address to host address space */
+int proc4430_unmap(void *handle, u32 mapped_addr);
+
+/* Function to retrive physical address translations */
+int proc4430_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries);
+
+/* =================================================
+ * APIs
+ * =================================================
+ */
+
+/* Function to get the default configuration for the OMAP4430PROC module */
+void proc4430_get_config(struct proc4430_config *cfg);
+
+/* Function to setup the OMAP4430PROC module. */
+int proc4430_setup(struct proc4430_config *cfg);
+
+/* Function to destroy the OMAP4430PROC module. */
+int proc4430_destroy(void);
+
+/* Function to initialize the parameters for this processor instance. */
+void proc4430_params_init(void *handle,
+ struct proc4430_params *params);
+
+/* Function to create an instance of this processor. */
+void *proc4430_create(u16 proc_id, const struct proc4430_params *params);
+
+/* Function to delete an instance of this processor. */
+int proc4430_delete(void **handle_ptr);
+
+/* Function to open an instance of this processor. */
+int proc4430_open(void **handle_ptr, u16 proc_id);
+
+/* Function to close an instance of this processor. */
+int proc4430_close(void *handle);
+
+/* Function to get the proc info */
+int proc4430_proc_info(void *handle, struct proc_mgr_proc_info *procinfo);
+
+#endif
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c
new file mode 100755
index 000000000000..fada09919052
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c
@@ -0,0 +1,400 @@
+/*
+ * proc4430_drv.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+
+/* Module headers */
+#include "proc4430.h"
+#include "proc4430_drvdefs.h"
+
+
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+#define PROC4430_NAME "syslink-proc4430"
+
+static char *driver_name = PROC4430_NAME;
+
+static s32 driver_major;
+
+static s32 driver_minor;
+
+struct proc_4430_dev {
+ struct cdev cdev;
+};
+
+static struct proc_4430_dev *proc_4430_device;
+
+static struct class *proc_4430_class;
+
+
+
+/** ============================================================================
+ * Forward declarations of internal functions
+ * ============================================================================
+ */
+/* Linux driver function to open the driver object. */
+static int proc4430_drv_open(struct inode *inode, struct file *filp);
+
+/* Linux driver function to close the driver object. */
+static int proc4430_drv_release(struct inode *inode, struct file *filp);
+
+/* Linux driver function to invoke the APIs through ioctl. */
+static int proc4430_drv_ioctl(struct inode *inode,
+ struct file *filp, unsigned int cmd,
+ unsigned long args);
+
+/* Linux driver function to map memory regions to user space. */
+static int proc4430_drv_mmap(struct file *filp,
+ struct vm_area_struct *vma);
+
+/* Module initialization function for Linux driver. */
+static int __init proc4430_drv_initializeModule(void);
+
+/* Module finalization function for Linux driver. */
+static void __exit proc4430_drv_finalizeModule(void);
+
+
+
+/** ============================================================================
+ * Globals
+ * ============================================================================
+ */
+
+/*
+ File operations table for PROC4430.
+ */
+static const struct file_operations proc_4430_fops = {
+ .open = proc4430_drv_open,
+ .release = proc4430_drv_release,
+ .ioctl = proc4430_drv_ioctl,
+ .mmap = proc4430_drv_mmap,
+};
+
+static int proc4430_drv_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int proc4430_drv_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+/*
+ Linux driver function to invoke the APIs through ioctl.
+ *
+ */
+static int proc4430_drv_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int retval = 0;
+ struct proc_mgr_cmd_args *cmd_args = (struct proc_mgr_cmd_args *)args;
+ struct proc_mgr_cmd_args command_args;
+
+ switch (cmd) {
+ case CMD_PROC4430_GETCONFIG:
+ {
+ struct proc4430_cmd_args_get_config *src_args =
+ (struct proc4430_cmd_args_get_config *)args;
+ struct proc4430_config cfg;
+
+ /* copy_from_useris not needed for
+ * proc4430_get_config, since the
+ * user's config is not used.
+ */
+ proc4430_get_config(&cfg);
+
+ retval = copy_to_user((void *)(src_args->cfg),
+ (const void *)&cfg,
+ sizeof(struct proc4430_config));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ }
+ break;
+
+ case CMD_PROC4430_SETUP:
+ {
+ struct proc4430_cmd_args_setup *src_args =
+ (struct proc4430_cmd_args_setup *)args;
+ struct proc4430_config cfg;
+
+ retval = copy_from_user((void *)&cfg,
+ (const void *)(src_args->cfg),
+ sizeof(struct proc4430_config));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ proc4430_setup(&cfg);
+ }
+ break;
+
+ case CMD_PROC4430_DESTROY:
+ {
+ proc4430_destroy();
+ }
+ break;
+
+ case CMD_PROC4430_PARAMS_INIT:
+ {
+ struct proc4430_cmd_args_params_init src_args;
+ struct proc4430_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_params_init));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ proc4430_params_init(src_args.handle, &params);
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *) &params,
+ sizeof(struct proc4430_params));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ }
+ break;
+
+ case CMD_PROC4430_CREATE:
+ {
+ struct proc4430_cmd_args_create src_args;
+ struct proc4430_params params;
+ struct proc4430_mem_entry *entries = NULL;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_create));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_from_user((void *) &params,
+ (const void *)(src_args.params),
+ sizeof(struct proc4430_params));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ /* Copy the contents of mem_entries from user-side */
+ if (params.num_mem_entries) {
+ entries = vmalloc(params.num_mem_entries * \
+ sizeof(struct proc4430_mem_entry));
+ if (WARN_ON(!entries))
+ goto func_exit;
+ retval = copy_from_user((void *) (entries),
+ (const void *)(params.mem_entries),
+ params.num_mem_entries * \
+ sizeof(struct proc4430_mem_entry));
+ if (WARN_ON(retval < 0)) {
+ vfree(entries);
+ goto func_exit;
+ }
+ params.mem_entries = entries;
+ }
+ src_args.handle = proc4430_create(src_args.proc_id,
+ &params);
+ if (WARN_ON(src_args.handle == NULL))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc4430_cmd_args_create));
+ /* Free the memory created */
+ if (params.num_mem_entries)
+ vfree(entries);
+ }
+ break;
+
+ case CMD_PROC4430_DELETE:
+ {
+ struct proc4430_cmd_args_delete src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_delete));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc4430_delete(&(src_args.handle));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROC4430_OPEN:
+ {
+ struct proc4430_cmd_args_open src_args;
+
+ /*Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_open));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc4430_open(&(src_args.handle),
+ src_args.proc_id);
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc4430_cmd_args_open));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROC4430_CLOSE:
+ {
+ struct proc4430_cmd_args_close src_args;
+
+ /*Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_close));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc4430_close(src_args.handle);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ default:
+ {
+ printk(KERN_ERR "unsupported ioctl\n");
+ }
+ break;
+ }
+func_exit:
+ /* Set the status and copy the common args to user-side. */
+ command_args.api_status = retval;
+ retval = copy_to_user((void *) cmd_args,
+ (const void *) &command_args,
+ sizeof(struct proc_mgr_cmd_args));
+ WARN_ON(retval < 0);
+ return retval;
+}
+
+
+/*
+ Linux driver function to map memory regions to user space.
+ *
+ */
+static int proc4430_drv_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+
+/** ==================================================================
+ * Functions required for multiple .ko modules configuration
+ * ==================================================================
+ */
+/*
+ Module initialization function for Linux driver.
+ */
+static int __init proc4430_drv_initializeModule(void)
+{
+ dev_t dev = 0 ;
+ int retval;
+
+ /* Display the version info and created date/time */
+ printk(KERN_INFO "proc4430_drv_initializeModule\n");
+
+ if (driver_major) {
+ dev = MKDEV(driver_major, driver_minor);
+ retval = register_chrdev_region(dev, 1, driver_name);
+ } else {
+ retval = alloc_chrdev_region(&dev, driver_minor, 1,
+ driver_name);
+ driver_major = MAJOR(dev);
+ }
+
+ proc_4430_device = kmalloc(sizeof(struct proc_4430_dev), GFP_KERNEL);
+ if (!proc_4430_device) {
+ retval = -ENOMEM;
+ unregister_chrdev_region(dev, 1);
+ goto exit;
+ }
+ memset(proc_4430_device, 0, sizeof(struct proc_4430_dev));
+ cdev_init(&proc_4430_device->cdev, &proc_4430_fops);
+ proc_4430_device->cdev.owner = THIS_MODULE;
+ proc_4430_device->cdev.ops = &proc_4430_fops;
+
+ retval = cdev_add(&proc_4430_device->cdev, dev, 1);
+
+ if (retval) {
+ printk(KERN_ERR "Failed to add the syslink proc_4430 device \n");
+ goto exit;
+ }
+
+ /* udev support */
+ proc_4430_class = class_create(THIS_MODULE, "syslink-proc4430");
+
+ if (IS_ERR(proc_4430_class)) {
+ printk(KERN_ERR "Error creating bridge class \n");
+ goto exit;
+ }
+ device_create(proc_4430_class, NULL, MKDEV(driver_major, driver_minor),
+ NULL, PROC4430_NAME);
+exit:
+ return 0;
+}
+
+/*
+ function to finalize the driver module.
+ */
+static void __exit proc4430_drv_finalizeModule(void)
+{
+ dev_t devno = 0;
+
+ /* FIX ME: THIS MIGHT NOT BE THE RIGHT PLACE TO CALL THE SETUP */
+ proc4430_destroy();
+
+ devno = MKDEV(driver_major, driver_minor);
+ if (proc_4430_device) {
+ cdev_del(&proc_4430_device->cdev);
+ kfree(proc_4430_device);
+ }
+ unregister_chrdev_region(devno, 1);
+ if (proc_4430_class) {
+ /* remove the device from sysfs */
+ device_destroy(proc_4430_class, MKDEV(driver_major,
+ driver_minor));
+ class_destroy(proc_4430_class);
+ }
+ return;
+}
+
+/*
+ Macro calls that indicate initialization and finalization functions
+ * to the kernel.
+ */
+MODULE_LICENSE("GPL v2");
+module_init(proc4430_drv_initializeModule);
+module_exit(proc4430_drv_finalizeModule);
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h
new file mode 100755
index 000000000000..4176d731f1d4
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h
@@ -0,0 +1,169 @@
+/*
+ * proc4430_drvdefs.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef _SYSLINK_PROC4430_H
+#define _SYSLINK_PROC4430_H
+
+
+/* Module headers */
+#include "../procmgr_drvdefs.h"
+#include "proc4430.h"
+
+
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for OMAP4430PROC
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * Base command ID for OMAP4430PROC
+ */
+#define PROC4430_BASE_CMD 0x200
+
+/*
+ * Command for PROC4430_getConfig
+ */
+#define CMD_PROC4430_GETCONFIG (PROC4430_BASE_CMD + 1)
+
+/*
+ * Command for PROC4430_setup
+ */
+#define CMD_PROC4430_SETUP (PROC4430_BASE_CMD + 2)
+
+/*
+ * Command for PROC4430_setup
+ */
+#define CMD_PROC4430_DESTROY (PROC4430_BASE_CMD + 3)
+
+/*
+ * Command for PROC4430_destroy
+ */
+#define CMD_PROC4430_PARAMS_INIT (PROC4430_BASE_CMD + 4)
+
+/*
+ * Command for PROC4430_create
+ */
+#define CMD_PROC4430_CREATE (PROC4430_BASE_CMD + 5)
+
+/*
+ * Command for PROC4430_delete
+ */
+#define CMD_PROC4430_DELETE (PROC4430_BASE_CMD + 6)
+
+/*
+ * Command for PROC4430_open
+ */
+#define CMD_PROC4430_OPEN (PROC4430_BASE_CMD + 7)
+
+/*
+ * Command for PROC4430_close
+ */
+#define CMD_PROC4430_CLOSE (PROC4430_BASE_CMD + 8)
+
+
+/* ---------------------------------------------------
+ * Command arguments for OMAP4430PROC
+ * ---------------------------------------------------
+ */
+/*
+ * Command arguments for PROC4430_getConfig
+ */
+struct proc4430_cmd_args_get_config {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ struct proc4430_config *cfg;
+ /* Pointer to the OMAP4430PROC module configuration structure
+ * in which the default config is to be returned. */
+};
+
+/*
+ * Command arguments for PROC4430_setup
+ */
+struct proc4430_cmd_args_setup {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ struct proc4430_config *cfg;
+ /* Optional OMAP4430PROC module configuration. If provided as NULL,
+ * default configuration is used. */
+};
+
+/*
+ * Command arguments for PROC4430_destroy
+ */
+struct proc4430_cmd_args_destroy {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+};
+
+/*
+ * Command arguments for struct struct proc4430_params_init
+ */
+struct proc4430_cmd_args_params_init {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ void *handle;
+ /* void * to the processor instance. */
+ struct proc4430_params *params;
+ /* Configuration parameters. */
+};
+
+/*
+ * Command arguments for PROC4430_create
+ */
+struct proc4430_cmd_args_create {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ u16 proc_id;
+ /* Processor ID for which this processor instance is required. */
+ struct proc4430_params *params;
+ /*Configuration parameters. */
+ void *handle;
+ /* void * to the created processor instance. */
+};
+
+/*
+ * Command arguments for PROC4430_delete
+ */
+struct proc4430_cmd_args_delete {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ void *handle;
+ /* Pointer to handle to the processor instance */
+};
+
+/*
+ * Command arguments for PROC4430_open
+ */
+struct proc4430_cmd_args_open {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ u16 proc_id;
+ /* Processor ID addressed by this OMAP4430PROC instance. */
+ void *handle;
+ /* Return parameter: void * to the processor instance */
+};
+
+/*
+ * Command arguments for PROC4430_close
+ */
+struct proc4430_cmd_args_close {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ void *handle;
+ /* void * to the processor instance */
+};
+
+#endif
diff --git a/drivers/dsp/syslink/procmgr/procdefs.h b/drivers/dsp/syslink/procmgr/procdefs.h
new file mode 100755
index 000000000000..eb73626d27e1
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procdefs.h
@@ -0,0 +1,203 @@
+/*
+ * procdefs.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef SYSLINK_PROCDEFS_H
+#define SYSLINK_PROCDEFS_H
+
+#include <linux/types.h>
+
+/* Module level headers */
+#include <procmgr.h>
+
+
+/* =============================
+ * Macros and types
+ * =============================
+ */
+/*
+ * Enumerates the types of Endianism of slave processor.
+ */
+enum processor_endian{
+ PROCESSOR_ENDIAN_DEFAULT = 0,
+ /* Default endianism (no conversion required) */
+ PROCESSOR_ENDIAN_BIG = 1,
+ /* Big endian */
+ PROCESSOR_ENDIAN_LITTLE = 2,
+ /* Little endian */
+ PROCESSOR_ENDIAN_ENDVALUE = 3
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+
+/*
+ * Configuration parameters for attaching to the slave Processor
+ */
+struct processor_attach_params {
+ struct proc_mgr_attach_params *params;
+ /* Common attach parameters for ProcMgr */
+ u16 num_mem_entries;
+ /* Number of valid memory entries */
+ struct proc_mgr_addr_info mem_entries[PROCMGR_MAX_MEMORY_REGIONS];
+ /* Configuration of memory regions */
+};
+
+/*
+ *Configuration parameters for starting the slave Processor
+ */
+struct processor_start_params {
+ struct proc_mgr_start_params *params;
+ /* Common start parameters for ProcMgr */
+};
+
+/*
+ *Configuration parameters for stopping the slave Processor
+ */
+struct processor_stop_params {
+ struct proc_mgr_stop_params *params;
+ /* Common start parameters for ProcMgr */
+};
+/*
+ * Function pointer type for the function to attach to the processor.
+ */
+typedef int (*processor_attach_fxn) (void *handle,
+ struct processor_attach_params *params);
+
+/*
+ * Function pointer type for the function to detach from the
+ * procssor
+ */
+typedef int (*processor_detach_fxn) (void *handle);
+
+/*
+ * Function pointer type for the function to start the processor.
+ */
+typedef int (*processor_start_fxn) (void *handle, u32 entry_pt,
+ struct processor_start_params *params);
+
+/*
+ *Function pointer type for the function to stop the processor.
+ */
+typedef int (*processor_stop_fxn) (void *handle,
+ struct processor_stop_params *params);
+
+/*
+ * Function pointer type for the function to read from the slave
+ * processor's memory.
+ */
+typedef int (*processor_read_fxn) (void *handle, u32 proc_addr,
+ u32 *num_bytes, void *buffer);
+
+/*
+ *Function pointer type for the function to write into the slave
+ *processor's memory.
+ */
+typedef int (*processor_write_fxn) (void *handle, u32 proc_addr,
+ u32 *num_bytes, void *buffer);
+
+/*
+ *Function pointer type for the function to perform device-dependent
+ * operations.
+ */
+typedef int (*processor_control_fxn) (void *handle, int cmd, void *arg);
+
+/*
+ *Function pointer type for the function to translate between
+ * two types of address spaces.
+ */
+typedef int (*processor_translate_addr_fxn) (void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dstAddrType, void *srcAddr,
+ enum proc_mgr_addr_type srcAddrType);
+
+/*
+ *Function pointer type for the function to map address to slave
+ * address space
+ */
+typedef int (*processor_map_fxn) (void *handle, u32 proc_addr, u32 size,
+ u32 *mapped_addr, u32 *mapped_size, u32 map_attribs);
+
+/*
+ *Function pointer type for the function to map address to slave
+ * address space
+ */
+typedef int (*processor_unmap_fxn) (void *handle, u32 mapped_addr);
+
+/*
+ *Function pointer type for the function that returns proc info
+ */
+typedef int (*processor_proc_info) (void *handle,
+ struct proc_mgr_proc_info *proc_info);
+
+/*
+ *Function pointer type for the function that returns proc info
+ */
+typedef int (*processor_virt_to_phys_fxn) (void *handle, u32 da,
+ u32 *mapped_entries, u32 num_of_entries);
+
+
+/* =============================
+ * Function table interface
+ * =============================
+ */
+/*
+ *Function table interface for Processor.
+ */
+struct processor_fxn_table {
+ processor_attach_fxn attach;
+ /* Function to attach to the slave processor */
+ processor_detach_fxn detach;
+ /* Function to detach from the slave processor */
+ processor_start_fxn start;
+ /* Function to start the slave processor */
+ processor_stop_fxn stop;
+ /* Function to stop the slave processor */
+ processor_read_fxn read;
+ /* Function to read from the slave processor's memory */
+ processor_write_fxn write;
+ /* Function to write into the slave processor's memory */
+ processor_control_fxn control;
+ /* Function to perform device-dependent control function */
+ processor_translate_addr_fxn translateAddr;
+ /* Function to translate between address ranges */
+ processor_map_fxn map;
+ /* Function to map slave addresses to master address space */
+ processor_unmap_fxn unmap;
+ /* Function to unmap slave addresses to master address space */
+ processor_proc_info procinfo;
+ /* Function to convert Virtual to Physical pages */
+ processor_virt_to_phys_fxn virt_to_phys;
+};
+
+/* =============================
+ * Processor structure
+ * =============================
+ */
+/*
+ * Generic Processor object. This object defines the handle type for all
+ * Processor operations.
+ */
+struct processor_object {
+ struct processor_fxn_table proc_fxn_table;
+ /* interface function table to plug into the generic Processor. */
+ enum proc_mgr_state state;
+ /* State of the slave processor */
+ enum proc_mgr_boot_mode boot_mode;
+ /* Boot mode for the slave processor. */
+ void *object;
+ /* Pointer to Processor-specific object. */
+ u16 proc_id;
+ /* Processor ID addressed by this Processor instance. */
+};
+#endif
diff --git a/drivers/dsp/syslink/procmgr/processor.c b/drivers/dsp/syslink/procmgr/processor.c
new file mode 100755
index 000000000000..4548d12ad967
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/processor.c
@@ -0,0 +1,398 @@
+/*
+ * processor.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Module level headers */
+#include "procdefs.h"
+#include "processor.h"
+
+
+
+/* =========================================
+ * Functions called by ProcMgr
+ * =========================================
+ */
+/*
+ * Function to attach to the Processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to attach to it.
+ * This function is called from the ProcMgr attach function, and
+ * hence is used to perform any activities that may be required
+ * once the slave is powered up.
+ * Depending on the type of Processor, this function may or may not
+ * perform any activities.
+ */
+inline int processor_attach(void *handle,
+ struct processor_attach_params *params)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(params == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.attach == NULL);
+
+ proc_handle->boot_mode = params->params->boot_mode;
+ retval = proc_handle->proc_fxn_table.attach(handle, params);
+
+ if (proc_handle->boot_mode == PROC_MGR_BOOTMODE_BOOT)
+ proc_handle->state = PROC_MGR_STATE_POWERED;
+ else if (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOLOAD)
+ proc_handle->state = PROC_MGR_STATE_LOADED;
+ else if (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOBOOT)
+ proc_handle->state = PROC_MGR_STATE_RUNNNING;
+ return retval;
+}
+
+
+/*
+ * Function to detach from the Processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to detach from it.
+ * This function is called from the ProcMgr detach function, and
+ * hence is useful to perform any activities that may be required
+ * before the slave is powered down.
+ * Depending on the type of Processor, this function may or may not
+ * perform any activities.
+ */
+inline int processor_detach(void *handle)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.detach == NULL);
+
+ retval = proc_handle->proc_fxn_table.detach(handle);
+ /* For all boot modes, at the end of detach, the Processor is in
+ * unknown state.
+ */
+ proc_handle->state = PROC_MGR_STATE_UNKNOWN;
+ return retval;
+}
+
+
+/*
+ * Function to start the processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to start the slave processor running.
+ * This function starts the slave processor running, in most
+ * devices, by programming its entry point into the boot location
+ * of the slave processor and releasing it from reset.
+ * The handle specifies the specific Processor instance to be used.
+ *
+ * @param handle void * to the Processor object
+ * @param entryPt Entry point of the file loaded on the slave Processor
+ *
+ * @sa Processor_stop
+ */
+inline int processor_start(void *handle, u32 entry_pt,
+ struct processor_start_params *params)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ /* entryPt may be 0 for some devices. Cannot check for valid/invalid. */
+ BUG_ON(params == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.start == NULL);
+ retval = proc_handle->proc_fxn_table.start(handle, entry_pt, params);
+
+ if ((proc_handle->boot_mode == PROC_MGR_BOOTMODE_BOOT)
+ || (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOLOAD))
+ proc_handle->state = PROC_MGR_STATE_RUNNNING;
+
+ return retval;
+}
+
+
+/*
+ * Function to stop the processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to stop the slave processor.
+ * This function stops the slave processor running, in most
+ * devices, by placing it in reset.
+ * The handle specifies the specific Processor instance to be used.
+ */
+inline int processor_stop(void *handle,
+ struct processor_stop_params *params)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.stop == NULL);
+
+ retval = proc_handle->proc_fxn_table.stop(handle, params);
+
+ if ((proc_handle->boot_mode == PROC_MGR_BOOTMODE_BOOT)
+ || (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOLOAD))
+ proc_handle->state = PROC_MGR_STATE_RESET;
+
+ return retval;
+}
+
+
+/*
+ * Function to read from the slave processor's memory.
+ *
+ * This function calls into the specific Processor implementation
+ * to read from the slave processor's memory. It reads from the
+ * specified address in the processor's address space and copies
+ * the required number of bytes into the specified buffer.
+ * It returns the number of bytes actually read in the num_bytes
+ * parameter.
+ * Depending on the processor implementation, it may result in
+ * reading from shared memory or across a peripheral physical
+ * connectivity.
+ * The handle specifies the specific Processor instance to be used.
+ */
+inline int processor_read(void *handle, u32 proc_addr,
+ u32 *num_bytes, void *buffer)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == 0);
+ BUG_ON(buffer == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.read == NULL);
+
+ retval = proc_handle->proc_fxn_table.read(handle, proc_addr,
+ num_bytes, buffer);
+ return retval;
+}
+
+
+/*
+ * Function to write into the slave processor's memory.
+ *
+ * This function calls into the specific Processor implementation
+ * to write into the slave processor's memory. It writes into the
+ * specified address in the processor's address space and copies
+ * the required number of bytes from the specified buffer.
+ * It returns the number of bytes actually written in the num_bytes
+ * parameter.
+ * Depending on the processor implementation, it may result in
+ * writing into shared memory or across a peripheral physical
+ * connectivity.
+ * The handle specifies the specific Processor instance to be used.
+ */
+inline int processor_write(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == 0);
+ BUG_ON(buffer == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.write == NULL);
+
+ retval = proc_handle->proc_fxn_table.write(handle, proc_addr,
+ num_bytes, buffer);
+ return retval;
+}
+
+
+/*
+ * Function to get the current state of the slave Processor.
+ *
+ * This function gets the state of the slave processor as
+ * maintained on the master Processor state machine. It does not
+ * go to the slave processor to get its actual state at the time
+ * when this API is called.
+ */
+enum proc_mgr_state processor_get_state(void *handle)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+
+ return proc_handle->state;
+}
+
+
+/*
+ * Function to set the current state of the slave Processor
+ * to specified value.
+ *
+ * This function is used to set the state of the processor to the
+ * value as specified. This function may be used by external
+ * entities that affect the state of the slave processor, such as
+ * PwrMgr, error handler, or ProcMgr.
+ */
+void processor_set_state(void *handle, enum proc_mgr_state state)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ proc_handle->state = state;
+}
+
+
+/*
+ * Function to perform device-dependent operations.
+ *
+ * This function calls into the specific Processor implementation
+ * to perform device dependent control operations. The control
+ * operations supported by the device are exposed directly by the
+ * specific implementation of the Processor interface. These
+ * commands and their specific argument types are used with this
+ * function.
+ */
+inline int processor_control(void *handle, int cmd, void *arg)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.control == NULL);
+
+ retval = proc_handle->proc_fxn_table.control(handle, cmd, arg);
+ return retval;
+}
+
+
+/*
+ * Function to translate between two types of address spaces.
+ *
+ * This function translates addresses between two types of address
+ * spaces. The destination and source address types are indicated
+ * through parameters specified in this function.
+ */
+inline int processor_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(dst_addr == NULL);
+ BUG_ON(src_addr == NULL);
+ BUG_ON(dst_addr_type >= PROC_MGR_ADDRTYPE_ENDVALUE);
+ BUG_ON(src_addr_type >= PROC_MGR_ADDRTYPE_ENDVALUE);
+ BUG_ON(proc_handle->proc_fxn_table.translateAddr == NULL);
+
+ retval = proc_handle->proc_fxn_table.translateAddr(handle,
+ dst_addr, dst_addr_type, src_addr, src_addr_type);
+ return retval;
+}
+
+
+/*
+ * Function to map address to slave address space.
+ *
+ * This function maps the provided slave address to a host address
+ * and returns the mapped address and size.
+ */
+inline int processor_map(void *handle, u32 proc_addr, u32 size,
+ u32 *mapped_addr, u32 *mapped_size, u32 map_attribs)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(size == 0);
+ BUG_ON(mapped_addr == NULL);
+ BUG_ON(mapped_size == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.map == NULL);
+
+ retval = proc_handle->proc_fxn_table.map(handle, proc_addr,
+ size, mapped_addr, mapped_size, map_attribs);
+ return retval;
+}
+
+/*
+ * Function to unmap address to slave address space.
+ *
+ * This function unmap the provided slave address
+ */
+inline int processor_unmap(void *handle, u32 mapped_addr)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ retval = proc_handle->proc_fxn_table.unmap(handle, mapped_addr);
+ return retval;
+}
+
+/*
+ * Function that registers for notification when the slave
+ * processor transitions to any of the states specified.
+ *
+ * This function allows the user application to register for
+ * changes in processor state and take actions accordingly.
+
+ */
+inline int processor_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[])
+{
+ int retval = 0;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(fxn == NULL);
+
+ /* TODO: TBD: To be implemented. */
+ return retval;
+}
+
+/*
+ * Function that returns the proc instance mem info
+ */
+int processor_get_proc_info(void *handle, struct proc_mgr_proc_info *procinfo)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+ int retval;
+ retval = proc_handle->proc_fxn_table.procinfo(proc_handle, procinfo);
+ return retval;
+}
+
+/*
+ * Function that returns the address translations
+ */
+int processor_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+ int retval;
+ retval = proc_handle->proc_fxn_table.virt_to_phys(handle, da,
+ mapped_entries, num_of_entries);
+ return retval;
+}
diff --git a/drivers/dsp/syslink/procmgr/processor.h b/drivers/dsp/syslink/procmgr/processor.h
new file mode 100755
index 000000000000..b4f78581839e
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/processor.h
@@ -0,0 +1,84 @@
+/*
+ * processor.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef SYSLINK_PROCESSOR_H_
+#define SYSLINK_PROCESSOR_H_
+
+#include <linux/types.h>
+
+/* Module level headers */
+#include "procdefs.h"
+
+/* ===================================
+ * APIs
+ * ===================================
+ */
+/* Function to attach to the Processor. */
+int processor_attach(void *handle, struct processor_attach_params *params);
+
+/* Function to detach from the Processor. */
+int processor_detach(void *handle);
+
+/* Function to start the processor. */
+int processor_start(void *handle, u32 entry_pt,
+ struct processor_start_params *params);
+
+/* Function to stop the processor. */
+int processor_stop(void *handle,
+ struct processor_stop_params *params);
+
+/* Function to read from the slave processor's memory. */
+int processor_read(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer);
+
+/* Function to read write into the slave processor's memory. */
+int processor_write(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer);
+
+/* Function to get the current state of the slave Processor as maintained on
+ * the master Processor state machine.
+ */
+enum proc_mgr_state processor_get_state(void *handle);
+
+/* Function to set the current state of the slave Processor to specified value.
+ */
+void processor_set_state(void *handle, enum proc_mgr_state state);
+
+/* Function to perform device-dependent operations. */
+int processor_control(void *handle, int cmd, void *arg);
+
+/* Function to translate between two types of address spaces. */
+int processor_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type);
+
+/* Function to map address to slave address space */
+int processor_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
+ u32 *mapped_size, u32 map_attribs);
+/* Function to unmap address to slave address space */
+int processor_unmap(void *handle, u32 mapped_addr);
+
+/* Function that registers for notification when the slave processor
+ * transitions to any of the states specified.
+ */
+int processor_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[]);
+
+/* Function that returns the return value of specific processor info
+ */
+int processor_get_proc_info(void *handle, struct proc_mgr_proc_info *procinfo);
+
+int processor_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries);
+#endif
diff --git a/drivers/dsp/syslink/procmgr/procmgr.c b/drivers/dsp/syslink/procmgr/procmgr.c
new file mode 100755
index 000000000000..a7124c5b9028
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procmgr.c
@@ -0,0 +1,957 @@
+/*
+ * procmgr.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <asm/atomic.h>
+
+/* Module level headers */
+#include <procmgr.h>
+#include "procdefs.h"
+#include "processor.h"
+#include <syslink/atomic_linux.h>
+
+/* ================================
+ * Macros and types
+ * ================================
+ */
+/*! @brief Macro to make a correct module magic number with refCount */
+#define PROCMGR_MAKE_MAGICSTAMP(x) ((PROCMGR_MODULEID << 12u) | (x))
+
+/*
+ * ProcMgr Module state object
+ */
+struct proc_mgr_module_object {
+ atomic_t ref_count;
+ u32 config_size;
+ /* Size of configuration structure */
+ struct proc_mgr_config cfg;
+ /* ProcMgr configuration structure */
+ struct proc_mgr_config def_cfg;
+ /* Default module configuration */
+ struct proc_mgr_params def_inst_params;
+ /* Default parameters for the ProcMgr instances */
+ struct proc_mgr_attach_params def_attach_params;
+ /* Default parameters for the ProcMgr attach function */
+ struct proc_mgr_start_params def_start_params;
+ /* Default parameters for the ProcMgr start function */
+ struct proc_mgr_stop_params def_stop_params;
+ /* Default parameters for the ProcMgr stop function */
+ struct mutex *gate_handle;
+ /* handle of gate to be used for local thread safety */
+ void *proc_handles[MULTIPROC_MAXPROCESSORS];
+ /* Array of handles of ProcMgr instances */
+};
+
+/*
+ * ProcMgr instance object
+ */
+struct proc_mgr_object {
+ u16 proc_id;
+ /* Processor ID associated with this ProcMgr. */
+ struct processor_object *proc_handle;
+ /* Processor ID of the processor being represented by this instance. */
+ void *loader_handle;
+ /*!< Handle to the Loader object associated with this ProcMgr. */
+ void *pwr_handle;
+ /*!< Handle to the PwrMgr object associated with this ProcMgr. */
+ /*!< Processor ID of the processor being represented by this instance */
+ struct proc_mgr_params params;
+ /* ProcMgr instance params structure */
+ struct proc_mgr_attach_params attach_params;
+ /* ProcMgr attach params structure */
+ struct proc_mgr_start_params start_params;
+ /* ProcMgr start params structure */
+ struct proc_mgr_stop_params stop_params;
+ /* ProcMgr start params structure */
+ u32 file_id;
+ /*!< File ID of the loaded static executable */
+ u16 num_mem_entries;
+ /* Number of valid memory entries */
+ struct proc_mgr_addr_info mem_entries[PROCMGR_MAX_MEMORY_REGIONS];
+ /* Configuration of memory regions */
+};
+
+struct proc_mgr_module_object proc_mgr_obj_state = {
+ .config_size = sizeof(struct proc_mgr_config),
+ .def_cfg.gate_handle = NULL,
+ .gate_handle = NULL,
+ .def_inst_params.proc_handle = NULL,
+ .def_attach_params.boot_mode = PROC_MGR_BOOTMODE_BOOT,
+ .def_start_params.proc_id = 0
+};
+
+
+/*======================================
+ * Function to get the default configuration for the ProcMgr
+ * module.
+ *
+* This function can be called by the application to get their
+* configuration parameter to ProcMgr_setup filled in by the
+* ProcMgr module with the default parameters. If the user does
+* not wish to make any change in the default parameters, this API
+* is not required to be called.
+ */
+void proc_mgr_get_config(struct proc_mgr_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ memcpy(cfg, &proc_mgr_obj_state.def_cfg,
+ sizeof(struct proc_mgr_config));
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_get_config);
+
+/*
+ * Function to setup the ProcMgr module.
+ *
+ *This function sets up the ProcMgr module. This function must
+ *be called before any other instance-level APIs can be invoked.
+ *Module-level configuration needs to be provided to this
+ *function. If the user wishes to change some specific config
+ *parameters, then ProcMgr_getConfig can be called to get the
+ *configuration filled with the default values. After this, only
+ *the required configuration values can be changed. If the user
+ *does not wish to make any change in the default parameters, the
+ *application can simply call ProcMgr_setup with NULL parameters.
+ *The default parameters would get automatically used.
+ */
+int proc_mgr_setup(struct proc_mgr_config *cfg)
+{
+ int retval = 0;
+ struct proc_mgr_config tmp_cfg;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&proc_mgr_obj_state.ref_count,
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&proc_mgr_obj_state.ref_count)
+ != PROCMGR_MAKE_MAGICSTAMP(1u))
+ return 0;
+ if (cfg == NULL) {
+ proc_mgr_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+ if (cfg->gate_handle != NULL) {
+ proc_mgr_obj_state.gate_handle = cfg->gate_handle;
+ } else {
+ /* User has not provided any gate handle, so create a
+ *default handle.
+ */
+ proc_mgr_obj_state.gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ mutex_init(proc_mgr_obj_state.gate_handle);
+ }
+ memcpy(&proc_mgr_obj_state.cfg, cfg, sizeof(struct proc_mgr_config));
+ /* Initialize the procHandles array. */
+ memset(&proc_mgr_obj_state.proc_handles, 0,
+ (sizeof(void *) * MULTIPROC_MAXPROCESSORS));
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_setup);
+
+/*==================================
+ * Function to destroy the ProcMgr module.
+ *
+ * Once this function is called, other ProcMgr module APIs, except
+ * for the proc_mgr_get_config API cannot be called anymore.
+ */
+int proc_mgr_destroy(void)
+{
+ int retval = 0;
+ int i;
+
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_destroy: Error - module not initialized\n");
+ return -EFAULT;
+ }
+ if (atomic_dec_return(&proc_mgr_obj_state.ref_count)
+ == PROCMGR_MAKE_MAGICSTAMP(0)) {
+
+ /* Check if any ProcMgr instances have not been deleted so far
+ *. If not,delete them
+ */
+ for (i = 0 ; i < MULTIPROC_MAXPROCESSORS; i++) {
+ if (proc_mgr_obj_state.proc_handles[i] != NULL)
+ proc_mgr_delete
+ (&(proc_mgr_obj_state.proc_handles[i]));
+ }
+
+ mutex_destroy(proc_mgr_obj_state.gate_handle);
+ kfree(proc_mgr_obj_state.gate_handle);
+ /* Decrease the refCount */
+ atomic_set(&proc_mgr_obj_state.ref_count,
+ PROCMGR_MAKE_MAGICSTAMP(0));
+ }
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_destroy);
+
+/*=====================================
+ * Function to initialize the parameters for the ProcMgr instance.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to ProcMgr_create filled in by the
+ * ProcMgr module with the default parameters.
+ */
+void proc_mgr_params_init(void *handle, struct proc_mgr_params *params)
+{
+ struct proc_mgr_object *proc_handle = (struct proc_mgr_object *)handle;
+
+ if (WARN_ON(params == NULL))
+ goto exit;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_params_init: Error - module not initialized\n");
+ }
+ if (handle == NULL) {
+ memcpy(params, &(proc_mgr_obj_state.def_inst_params),
+ sizeof(struct proc_mgr_params));
+ } else {
+ /* Return updated ProcMgr instance specific parameters. */
+ memcpy(params, &(proc_handle->params),
+ sizeof(struct proc_mgr_params));
+ }
+exit:
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_params_init);
+
+/*=====================================
+ * Function to create a ProcMgr object for a specific slave
+ * processor.
+ *
+ * This function creates an instance of the ProcMgr module and
+ * returns an instance handle, which is used to access the
+ * specified slave processor. The processor ID specified here is
+ * the ID of the slave processor as configured with the MultiProc
+ * module.
+ * Instance-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then struct proc_mgr_params_init can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. For this
+ * API, the params argument is not optional, since the user needs
+ * to provide some essential values such as loader, PwrMgr and
+ * Processor instances to be used with this ProcMgr instance.
+ */
+void *proc_mgr_create(u16 proc_id, const struct proc_mgr_params *params)
+{
+ struct proc_mgr_object *handle = NULL;
+
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+ BUG_ON(params == NULL);
+ BUG_ON(params->proc_handle == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_create: Error - module not initialized\n");
+ return NULL;
+ }
+ if (proc_mgr_obj_state.proc_handles[proc_id] != NULL) {
+ handle = proc_mgr_obj_state.proc_handles[proc_id];
+ printk(KERN_WARNING "proc_mgr_create:"
+ "Processor already exists for specified"
+ "%d proc_id, handle = 0x%x\n", proc_id, (u32)handle);
+ return handle;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ handle = (struct proc_mgr_object *)
+ vmalloc(sizeof(struct proc_mgr_object));
+ BUG_ON(handle == NULL);
+ memset(handle, 0, sizeof(struct proc_mgr_object));
+ memcpy(&(handle->params), params, sizeof(struct proc_mgr_params));
+ handle->proc_id = proc_id;
+ handle->proc_handle = params->proc_handle;
+ handle->loader_handle = params->loader_handle;
+ handle->pwr_handle = params->pwr_handle;
+ proc_mgr_obj_state.proc_handles[proc_id] = handle;
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return handle;
+}
+EXPORT_SYMBOL(proc_mgr_create);
+
+/*===================================
+ * Function to delete a ProcMgr object for a specific slave
+ * processor.
+ *
+ * Once this function is called, other ProcMgr instance level APIs
+ * that require the instance handle cannot be called.
+ *
+ */
+int
+proc_mgr_delete(void **handle_ptr)
+{
+ int retval = 0;
+ struct proc_mgr_object *handle;
+
+ BUG_ON(handle_ptr == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_delete: Error - module not initialized\n");
+ return -EFAULT;
+ }
+
+ handle = (struct proc_mgr_object *)(*handle_ptr);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ proc_mgr_obj_state.proc_handles[handle->proc_id] = NULL;
+ vfree(handle);
+ *handle_ptr = NULL;
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_delete);
+
+/*======================================
+ * Function to open a handle to an existing ProcMgr object handling
+ * the proc_id.
+ *
+ * This function returns a handle to an existing ProcMgr instance
+ * created for this proc_id. It enables other entities to access
+ * and use this ProcMgr instance.
+ */
+int proc_mgr_open(void **handle_ptr, u16 proc_id)
+{
+ int retval = 0;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_open: Error - module not initialized\n");
+ return -EFAULT;
+ }
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ *handle_ptr = proc_mgr_obj_state.proc_handles[proc_id];
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_open);
+
+/*=====================================
+ * Function to close this handle to the ProcMgr instance.
+ *
+ * This function closes the handle to the ProcMgr instance
+ * obtained through proc_mgr_open call made earlier.
+ */
+int proc_mgr_close(void *handle)
+{
+ int retval = 0;
+
+ BUG_ON(handle == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_close: Error - module not initialized\n");
+ return -EFAULT;
+ }
+ /* Nothing to be done for closing the handle. */
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_close);
+
+/*========================================
+ * Function to initialize the parameters for the ProcMgr attach
+ * function.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to proc_mgr_attach filled in by the
+ * ProcMgr module with the default parameters. If the user does
+ * not wish to make any change in the default parameters, this API
+ * is not required to be called.
+ */
+void proc_mgr_get_attach_params(void *handle,
+ struct proc_mgr_attach_params *params)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ BUG_ON(params == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_attach_params:"
+ "Error - module not initialized\n");
+ }
+ if (handle == NULL) {
+ memcpy(params, &(proc_mgr_obj_state.def_attach_params),
+ sizeof(struct proc_mgr_attach_params));
+ } else {
+ /* Return updated ProcMgr instance specific parameters. */
+ memcpy(params, &(proc_mgr_handle->attach_params),
+ sizeof(struct proc_mgr_attach_params));
+ }
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_get_attach_params);
+
+/*
+ * Function to attach the client to the specified slave and also
+ * initialize the slave (if required).
+ *
+ * This function attaches to an instance of the ProcMgr module and
+ * performs any hardware initialization required to power up the
+ * slave device. This function also performs the required state
+ * transitions for this ProcMgr instance to ensure that the local
+ * object representing the slave device correctly indicates the
+ * state of the slave device. Depending on the slave boot mode
+ * being used, the slave may be powered up, in reset, or even
+ * running state.
+ * Configuration parameters need to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then proc_mgr_get_attach_params can be called to get
+ * the configuration filled with the default values. After this,
+ * only the required configuration values can be changed. If the
+ * user does not wish to make any change in the default parameters,
+ * the application can simply call proc_mgr_attach with NULL
+ * parameters.
+ * The default parameters would get automatically used.
+ */
+int proc_mgr_attach(void *handle, struct proc_mgr_attach_params *params)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct proc_mgr_attach_params tmp_params;
+ struct processor_attach_params proc_attach_params;
+
+ if (params == NULL) {
+ proc_mgr_get_attach_params(handle, &tmp_params);
+ params = &tmp_params;
+ }
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_attach:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ if (WARN_ON(handle == NULL)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+ if (WARN_ON(params->boot_mode == PROC_MGR_BOOTMODE_ENDVALUE)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Copy the user provided values into the instance object. */
+ memcpy(&(proc_mgr_handle->attach_params), params,
+ sizeof(struct proc_mgr_attach_params));
+ proc_attach_params.params = params;
+ proc_attach_params.num_mem_entries = 0;
+ /* Attach to the specified Processor instance. */
+ retval = processor_attach(proc_mgr_handle->proc_handle,
+ &proc_attach_params);
+ proc_mgr_handle->num_mem_entries = proc_attach_params.num_mem_entries;
+ printk(KERN_INFO "proc_mgr_attach:proc_mgr_handle->num_mem_entries = %d\n",
+ proc_mgr_handle->num_mem_entries);
+ /* Store memory information in local object.*/
+ memcpy(&(proc_mgr_handle->mem_entries),
+ &(proc_attach_params.mem_entries),
+ sizeof(proc_mgr_handle->mem_entries));
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_attach);
+
+/*===================================
+ * Function to detach the client from the specified slave and also
+ * finalze the slave (if required).
+ *
+ * This function detaches from an instance of the ProcMgr module
+ * and performs any hardware finalization required to power down
+ * the slave device. This function also performs the required state
+ * transitions for this ProcMgr instance to ensure that the local
+ * object representing the slave device correctly indicates the
+ * state of the slave device. Depending on the slave boot mode
+ * being used, the slave may be powered down, in reset, or left in
+ * its original state.
+*/
+int proc_mgr_detach(void *handle)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_detach:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Detach from the Processor. */
+ retval = processor_detach(proc_mgr_handle->proc_handle);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_detach);
+
+/*===============================
+ * Function to initialize the parameters for the ProcMgr start
+ * function.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to proc_mgr_start filled in by the
+ * ProcMgr module with the default parameters. If the user does
+ * not wish to make any change in the default parameters, this API
+ * is not required to be called.
+ *
+ */
+void proc_mgr_get_start_params(void *handle,
+ struct proc_mgr_start_params *params)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_start_params:"
+ "Error - module not initialized\n");
+ }
+ BUG_ON(params == NULL);
+
+ if (handle == NULL) {
+ memcpy(params, &(proc_mgr_obj_state.def_start_params),
+ sizeof(struct proc_mgr_start_params));
+ } else {
+ /* Return updated ProcMgr instance specific parameters. */
+ memcpy(params, &(proc_mgr_handle->start_params),
+ sizeof(struct proc_mgr_start_params));
+ }
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_get_start_params);
+
+/*==========================================
+ * Function to start the slave processor running.
+ *
+ * Function to start execution of the loaded code on the slave
+ * from the entry point specified in the slave executable loaded
+ * earlier by call to proc_mgr_load ().
+ * After successful completion of this function, the ProcMgr
+ * instance is expected to be in the proc_mgr_State_Running state.
+ */
+int proc_mgr_start(void *handle, u32 entry_point,
+ struct proc_mgr_start_params *params)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct proc_mgr_start_params tmp_params;
+ struct processor_start_params proc_params;
+
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_start:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+
+ if (params == NULL) {
+ proc_mgr_get_start_params(handle, &tmp_params);
+ params = &tmp_params;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ memcpy(&(proc_mgr_handle->start_params), params,
+ sizeof(struct proc_mgr_start_params));
+ /* Start the slave processor running. */
+ proc_params.params = params;
+ retval = processor_start(proc_mgr_handle->proc_handle,
+ entry_point, &proc_params);
+
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_start);
+
+/*========================================
+ * Function to stop the slave processor.
+ *
+ * Function to stop execution of the slave processor.
+ * Depending on the boot mode, after successful completion of this
+ * function, the ProcMgr instance may be in the proc_mgr_State_Reset
+ * state.
+ *
+ */
+int proc_mgr_stop(void *handle, struct proc_mgr_stop_params *params)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct processor_stop_params proc_params;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_stop:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ proc_params.params = params;
+ retval = processor_stop(proc_mgr_handle->proc_handle,
+ &proc_params);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_stop);
+
+/*===================================
+ * Function to get the current state of the slave Processor.
+ *
+ * This function gets the state of the slave processor as
+ * maintained on the master Processor state machine. It does not
+ * go to the slave processor to get its actual state at the time
+ * when this API is called.
+ *
+ */
+enum proc_mgr_state proc_mgr_get_state(void *handle)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ enum proc_mgr_state state = PROC_MGR_STATE_UNKNOWN;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_state:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ state = processor_get_state(proc_mgr_handle->proc_handle);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return state;
+}
+EXPORT_SYMBOL(proc_mgr_get_state);
+
+/*==================================================
+ * Function to read from the slave processor's memory.
+ *
+ * This function reads from the specified address in the
+ * processor's address space and copies the required number of
+ * bytes into the specified buffer.
+ * It returns the number of bytes actually read in thenum_bytes
+ * parameter.
+ */
+int proc_mgr_read(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_read:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == NULL);
+ BUG_ON(buffer == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ retval = processor_read(proc_mgr_handle->proc_handle, proc_addr,
+ num_bytes, buffer);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_read);
+
+/*
+ * Function to write into the slave processor's memory.
+ *
+ * This function writes into the specified address in the
+ * processor's address space and copies the required number of
+ * bytes from the specified buffer.
+ * It returns the number of bytes actually written in thenum_bytes
+ * parameter.
+ */
+int proc_mgr_write(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_write:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == NULL);
+ BUG_ON(buffer == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ retval = processor_write(proc_mgr_handle->proc_handle, proc_addr,
+ num_bytes, buffer);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_write);
+
+
+/*===================================
+ * Function to perform device-dependent operations.
+ *
+ * This function performs control operations supported by the
+ * as exposed directly by the specific implementation of the
+ * Processor interface. These commands and their specific argument
+ * types are used with this function.
+ */
+int proc_mgr_control(void *handle, int cmd, void *arg)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle
+ = (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_control:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Perform device-dependent control operation. */
+ retval = processor_control(proc_mgr_handle->proc_handle, cmd, arg);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_control);
+
+/*========================================
+ * Function to translate between two types of address spaces.
+ *
+ * This function translates addresses between two types of address
+ * spaces. The destination and source address types are indicated
+ * through parameters specified in this function.
+ */
+int proc_mgr_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_translate_addr:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(dst_addr == NULL);
+ BUG_ON(handle == NULL);
+ BUG_ON(dst_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE);
+ BUG_ON(src_addr == NULL);
+ BUG_ON(src_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Translate the address. */
+ retval = processor_translate_addr(proc_mgr_handle->proc_handle,
+ dst_addr, dst_addr_type, src_addr, src_addr_type);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_translate_addr);
+
+/*============================================
+ * Function to map address to slave address space.
+ *
+ * This function maps the provided slave address to a host address
+ * and returns the mapped address and size.
+ *
+ */
+int proc_mgr_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
+ u32 *mapped_size, u32 map_attribs)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_map:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(mapped_addr == NULL);
+ BUG_ON(mapped_size == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ /* Map to host address space. */
+ retval = processor_map(proc_mgr_handle->proc_handle, proc_addr,
+ size, mapped_addr, mapped_size, map_attribs);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_map);
+
+/*============================================
+ * Function to unmap address to slave address space.
+ *
+ * This function unmaps the provided slave address to a host address
+ *
+ */
+int proc_mgr_unmap(void *handle, u32 mapped_addr)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_unmap:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ /* Map to host address space. */
+ retval = processor_unmap(proc_mgr_handle->proc_handle, mapped_addr);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_unmap);
+
+/*=================================
+ * Function that registers for notification when the slave
+ * processor transitions to any of the states specified.
+ *
+ * This function allows the user application to register for
+ * changes in processor state and take actions accordingly.
+ *
+ */
+int proc_mgr_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[])
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle
+ = (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_register_notify:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ BUG_ON(fxn == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ retval = processor_register_notify(proc_mgr_handle->proc_handle, fxn,
+ args, state);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_register_notify);
+
+/*
+ * Function that returns information about the characteristics of
+ * the slave processor.
+ */
+int proc_mgr_get_proc_info(void *handle, struct proc_mgr_proc_info *proc_info)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct processor_object *proc_handle;
+
+ struct proc_mgr_proc_info proc_info_test;
+
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_proc_info:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ if (WARN_ON(handle == NULL))
+ goto error_exit;
+ if (WARN_ON(proc_info == NULL))
+ goto error_exit;
+ proc_handle = proc_mgr_handle->proc_handle;
+ if (WARN_ON(proc_handle == NULL))
+ goto error_exit;
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ processor_get_proc_info(proc_handle, &proc_info_test);
+ /* Return bootMode information. */
+ proc_info->boot_mode = proc_mgr_handle->attach_params.boot_mode;
+ /* Return memory information. */
+ proc_info->num_mem_entries = proc_mgr_handle->num_mem_entries;
+ memcpy(&(proc_info->mem_entries),
+ &(proc_mgr_handle->mem_entries),
+ sizeof(proc_mgr_handle->mem_entries));
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return 0;
+error_exit:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(proc_mgr_get_proc_info);
+
+/*============================================
+ * Function to get virtual to physical address translations
+ *
+ * This function retrieves physical entries
+ *
+ */
+int proc_mgr_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ /* Map to host address space. */
+ retval = processor_virt_to_phys(proc_mgr_handle->proc_handle, da,
+ mapped_entries, num_of_entries);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_virt_to_phys);
+
diff --git a/drivers/dsp/syslink/procmgr/procmgr_drv.c b/drivers/dsp/syslink/procmgr/procmgr_drv.c
new file mode 100755
index 000000000000..59de4eed1058
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procmgr_drv.c
@@ -0,0 +1,758 @@
+/*
+ * procmgr_drv.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+/* Module headers */
+#include <procmgr.h>
+#include "procmgr_drvdefs.h"
+
+#define PROCMGR_NAME "syslink-procmgr"
+
+static char *driver_name = PROCMGR_NAME;
+
+static s32 driver_major;
+
+static s32 driver_minor;
+
+struct procmgr_dev {
+ struct cdev cdev;
+};
+
+struct platform_device *omap_proc_dev;
+static struct platform_device *procmgr_pdev;
+static struct procmgr_dev *procmgr_device;
+
+static struct class *proc_mgr_class;
+
+
+/** ====================================
+ * Forward declarations of internal functions
+ * ====================================
+ */
+/* Linux driver function to open the driver object. */
+static int proc_mgr_drv_open(struct inode *inode, struct file *filp);
+
+/* Linux driver function to close the driver object. */
+static int proc_mgr_drv_release(struct inode *inode, struct file *filp);
+
+/* Linux driver function to invoke the APIs through ioctl. */
+static int proc_mgr_drv_ioctl(struct inode *inode,
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long args);
+
+/* Linux driver function to map memory regions to user space. */
+static int proc_mgr_drv_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* Module initialization function for Linux driver. */
+static int __init proc_mgr_drv_initialize_module(void);
+
+/* Module finalization function for Linux driver. */
+static void __exit proc_mgr_drv_finalize_module(void);
+
+/* Platform driver probe function */
+static int __devinit proc_mgr_probe(struct platform_device *pdev);
+
+/* Platform driver remove function */
+static int __devexit proc_mgr_remove(struct platform_device *pdev);
+
+/*
+ * name DriverOps
+ *
+ * desc Function to invoke the APIs through ioctl
+ *
+ */
+static const struct file_operations procmgr_fops = {
+ .open = proc_mgr_drv_open,
+ .ioctl = proc_mgr_drv_ioctl,
+ .release = proc_mgr_drv_release,
+ .mmap = proc_mgr_drv_mmap,
+} ;
+
+/* Imtiaz changed places */
+static struct platform_driver procmgr_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PROCMGR_NAME,
+ },
+ .probe = proc_mgr_probe,
+ .remove = __devexit_p(proc_mgr_remove),
+ .shutdown = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+/*
+* brief Linux specific function to open the driver.
+ */
+static int proc_mgr_drv_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+* brief Linux driver function to close the driver object.
+ */
+static int proc_mgr_drv_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+* Linux driver function to invoke the APIs through ioctl.
+ */
+static int proc_mgr_drv_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int retval = 0;
+ struct proc_mgr_cmd_args *cmd_args = (struct proc_mgr_cmd_args *)args;
+ struct proc_mgr_cmd_args command_args;
+
+ switch (cmd) {
+ case CMD_PROCMGR_GETCONFIG:
+ {
+ struct proc_mgr_cmd_args_get_config *src_args =
+ (struct proc_mgr_cmd_args_get_config *)args;
+ struct proc_mgr_config cfg;
+
+ /* copy_from_user is not needed for proc_mgr_get_config,
+ * since the user's config is not used.
+ */
+ proc_mgr_get_config(&cfg);
+
+ retval = copy_to_user((void *)(src_args->cfg),
+ (const void *)&cfg,
+ sizeof(struct proc_mgr_config));
+
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_SETUP:
+ {
+ struct proc_mgr_cmd_args_setup *src_args =
+ (struct proc_mgr_cmd_args_setup *)args;
+ struct proc_mgr_config cfg;
+
+ retval = copy_from_user((void *)&cfg,
+ (const void *)(src_args->cfg),
+ sizeof(struct proc_mgr_config));
+
+ /* This check is needed at run-time also since it
+ * depends on run environment.
+ * It must not be optimized out.
+ */
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+
+ retval = proc_mgr_setup(&cfg);
+ }
+ break;
+
+ case CMD_PROCMGR_DESTROY:
+ {
+ retval = proc_mgr_destroy();
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_PARAMS_INIT:
+ {
+ struct proc_mgr_cmd_args_params_init src_args;
+ struct proc_mgr_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_params_init));
+
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+
+ proc_mgr_params_init(src_args.handle, &params);
+
+ /* Copy only the params to user-side */
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *)&params,
+ sizeof(struct proc_mgr_params));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_CREATE:
+ {
+ struct proc_mgr_cmd_args_create src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_create));
+
+ src_args.handle = proc_mgr_create(src_args.proc_id,
+ &(src_args.params));
+ if (src_args.handle == NULL) {
+ retval = -EFAULT;
+ goto func_exit;
+ }
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_create));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_DELETE:
+ {
+ struct proc_mgr_cmd_args_delete src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_delete));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+
+ retval = proc_mgr_delete(&(src_args.handle));
+ }
+ break;
+
+ case CMD_PROCMGR_OPEN:
+ {
+ struct proc_mgr_cmd_args_open src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_open));
+
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_open(&(src_args.handle),
+ src_args.proc_id);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc_mgr_get_proc_info(src_args.handle,
+ &(src_args.proc_info));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args), (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_open));
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_CLOSE:
+ {
+ struct proc_mgr_cmd_args_close src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_close));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_close(&(src_args.handle));
+ }
+ break;
+
+ case CMD_PROCMGR_GETATTACHPARAMS:
+ {
+ struct proc_mgr_cmd_args_get_attach_params src_args;
+ struct proc_mgr_attach_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_attach_params));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ proc_mgr_get_attach_params(src_args.handle, &params);
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *)&params,
+ sizeof(struct proc_mgr_attach_params));
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_ATTACH:
+ {
+ struct proc_mgr_cmd_args_attach src_args;
+ struct proc_mgr_attach_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_attach));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ /* Copy params from user-side. */
+ retval = copy_from_user((void *)&params,
+ (const void *)(src_args.params),
+ sizeof(struct proc_mgr_attach_params));
+ retval = proc_mgr_attach(src_args.handle, &params);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ /* Get memory information. */
+ retval = proc_mgr_get_proc_info(src_args.handle,
+ &(src_args.proc_info));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_attach));
+ }
+ break;
+
+ case CMD_PROCMGR_DETACH:
+ {
+ struct proc_mgr_cmd_args_detach src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_detach));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_detach(src_args.handle);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ }
+ break;
+
+ case CMD_PROCMGR_GETSTARTPARAMS:
+ {
+ struct proc_mgr_cmd_args_get_start_params src_args;
+ struct proc_mgr_start_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_start_params));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ proc_mgr_get_start_params(src_args.handle, &params);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *)&params,
+ sizeof(struct proc_mgr_start_params));
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_START:
+ {
+ struct proc_mgr_cmd_args_start src_args;
+ struct proc_mgr_start_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_start));
+ /* Copy params from user-side. */
+ retval = copy_from_user((void *)&params,
+ (const void *)(src_args.params),
+ sizeof(struct proc_mgr_start_params));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_start(src_args.handle,
+ src_args.entry_point, &params);
+
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_STOP:
+ {
+ struct proc_mgr_cmd_args_stop src_args;
+
+ struct proc_mgr_stop_params params;
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_stop));
+ /* Copy params from user-side. */
+ retval = copy_from_user((void *)&params,
+ (const void *)(src_args.params),
+ sizeof(struct proc_mgr_stop_params));
+
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_stop(src_args.handle, &params);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_GETSTATE:
+ {
+ struct proc_mgr_cmd_args_get_state src_args;
+ enum proc_mgr_state procmgrstate;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_state));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ procmgrstate = proc_mgr_get_state(src_args.handle);
+ src_args.proc_mgr_state = procmgrstate;
+ retval = copy_to_user((void *)(args), (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_get_state));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_READ:
+ {
+ struct proc_mgr_cmd_args_read src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_read));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_read(src_args.handle,
+ src_args.proc_addr, &(src_args.num_bytes),
+ src_args.buffer);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_read));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_WRITE:
+ {
+ struct proc_mgr_cmd_args_write src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_write));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_write(src_args.handle,
+ src_args.proc_addr, &(src_args.num_bytes),
+ src_args.buffer);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_write));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_CONTROL:
+ {
+ struct proc_mgr_cmd_args_control src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_control));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_control(src_args.handle,
+ src_args.cmd, src_args.arg);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_TRANSLATEADDR:
+ {
+ struct proc_mgr_cmd_args_translate_addr src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_translate_addr));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_translate_addr(src_args.handle,
+ &(src_args.dst_addr), src_args.dst_addr_type,
+ src_args.src_addr, src_args.src_addr_type);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args, sizeof
+ (struct proc_mgr_cmd_args_translate_addr));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_MAP:
+ {
+ struct proc_mgr_cmd_args_map src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_map));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_map(src_args.handle,
+ src_args.proc_addr, src_args.size,
+ &(src_args.mapped_addr),
+ &(src_args.mapped_size),
+ src_args.map_attribs);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_map));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_UNMAP:
+ {
+ struct proc_mgr_cmd_args_unmap src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_unmap));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_unmap(src_args.handle,
+ (src_args.mapped_addr));
+ WARN_ON(retval < 0);
+ }
+
+ case CMD_PROCMGR_REGISTERNOTIFY:
+ {
+ struct proc_mgr_cmd_args_register_notify src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_register_notify));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_register_notify(src_args.handle,
+ src_args.callback_fxn,
+ src_args.args, src_args.state);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_GETPROCINFO:
+ {
+ struct proc_mgr_cmd_args_get_proc_info src_args;
+ struct proc_mgr_proc_info proc_info;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_proc_info));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_get_proc_info
+ (src_args.handle, &proc_info);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(src_args.proc_info),
+ (const void *) &proc_info,
+ sizeof(struct proc_mgr_proc_info));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_GETVIRTTOPHYS:
+ {
+ struct proc_mgr_cmd_args_get_virt_to_phys src_args;
+
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_virt_to_phys));
+ retval = proc_mgr_virt_to_phys(src_args.handle,
+ src_args.da, (src_args.mem_entries),
+ src_args.num_of_entries);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args), (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_get_virt_to_phys));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ default:
+ printk(KERN_ERR"PROC_MGR_DRV: WRONG IOCTL !!!!\n");
+ BUG_ON(1);
+ break;
+ }
+func_exit:
+ /* Set the retval and copy the common args to user-side. */
+ command_args.api_status = retval;
+ retval = copy_to_user((void *)cmd_args,
+ (const void *)&command_args, sizeof(struct proc_mgr_cmd_args));
+
+ WARN_ON(retval < 0);
+ return retval;
+}
+
+
+/*
+ Driver function to map memory regions to user space.
+ */
+static int proc_mgr_drv_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int __devinit proc_mgr_probe(struct platform_device *pdev)
+{
+ dev_t dev = 0 ;
+ int retval = -ENOMEM;
+
+ /* Display the version info and created date/time */
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n\n", __func__);
+
+ if (driver_major) {
+ dev = MKDEV(driver_major, driver_minor);
+ retval = register_chrdev_region(dev, 1, driver_name);
+ } else {
+ retval = alloc_chrdev_region(&dev, driver_minor, 1,
+ driver_name);
+ driver_major = MAJOR(dev);
+ }
+
+ procmgr_device = kmalloc(sizeof(struct procmgr_dev), GFP_KERNEL);
+ if (!procmgr_device) {
+ retval = -ENOMEM;
+ unregister_chrdev_region(dev, 1);
+ goto exit;
+ }
+ memset(procmgr_device, 0, sizeof(struct procmgr_dev));
+ cdev_init(&procmgr_device->cdev, &procmgr_fops);
+ procmgr_device->cdev.owner = THIS_MODULE;
+ procmgr_device->cdev.ops = &procmgr_fops;
+
+ retval = cdev_add(&procmgr_device->cdev, dev, 1);
+
+ if (retval) {
+ printk(KERN_ERR "Failed to add the syslink procmgr device \n");
+ goto exit;
+ }
+
+ /* udev support */
+ proc_mgr_class = class_create(THIS_MODULE, "syslink-procmgr");
+
+ if (IS_ERR(proc_mgr_class)) {
+ printk(KERN_ERR "Error creating bridge class \n");
+ goto exit;
+ }
+ device_create(proc_mgr_class, NULL, MKDEV(driver_major, driver_minor),
+ NULL, PROCMGR_NAME);
+
+exit:
+ dev_dbg(&omap_proc_dev->dev, "Leaving %s function\n\n", __func__);
+ return retval;
+}
+
+
+static int __devexit proc_mgr_remove(struct platform_device *pdev)
+{
+ dev_t devno = 0;
+
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n", __func__);
+ devno = MKDEV(driver_major, driver_minor);
+ if (procmgr_device) {
+ cdev_del(&procmgr_device->cdev);
+ kfree(procmgr_device);
+ }
+ unregister_chrdev_region(devno, 1);
+ if (proc_mgr_class) {
+ /* remove the device from sysfs */
+ device_destroy(proc_mgr_class, MKDEV(driver_major,
+ driver_minor));
+ class_destroy(proc_mgr_class);
+ }
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n", __func__);
+ return 0;
+}
+
+/*
+* Module initialization function for Linux driver.
+ */
+static int __init proc_mgr_drv_initialize_module(void)
+{
+ int retval = -ENOMEM;
+
+ procmgr_pdev = platform_device_alloc(PROCMGR_NAME, -1);
+ if (!procmgr_pdev) {
+ printk(KERN_ERR "%s:device allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+ retval = platform_device_add(procmgr_pdev);
+ if (retval)
+ goto err_out;
+
+ /*Saving the context for future use*/
+ omap_proc_dev = procmgr_pdev;
+
+ retval = platform_driver_register(&procmgr_driver);
+ if (!retval)
+ return retval;
+err_out:
+ platform_device_put(procmgr_pdev);
+ return retval;
+}
+
+/*
+* driver function to finalize the driver module.
+ */
+static void __exit proc_mgr_drv_finalize_module(void)
+{
+
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n", __func__);
+ platform_device_unregister(procmgr_pdev);
+ platform_driver_unregister(&procmgr_driver);
+ dev_dbg(&omap_proc_dev->dev, "Leaving %s function\n", __func__);
+}
+
+/*
+* brief Macro calls that indicate initialization and finalization functions
+ * to the kernel.
+ */
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mugdha Kamoolkar");
+module_init(proc_mgr_drv_initialize_module);
+module_exit(proc_mgr_drv_finalize_module);
diff --git a/drivers/dsp/syslink/procmgr/procmgr_drvdefs.h b/drivers/dsp/syslink/procmgr/procmgr_drvdefs.h
new file mode 100755
index 000000000000..2be14bf7a20e
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procmgr_drvdefs.h
@@ -0,0 +1,541 @@
+/*
+ * procmgr_drvdefs.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef SYSLINK_PROCMGR_DRVDEFS_H
+#define SYSLINK_PROCMGR_DRVDEFS_H
+
+#include <linux/types.h>
+
+/* Module headers */
+#include <procmgr.h>
+
+
+/* =================================
+ * Macros and types
+ * =================================
+ */
+/*
+ * Base structure for ProcMgr command args. This needs to be the first
+ * field in all command args structures.
+ */
+struct proc_mgr_cmd_args {
+ int api_status;
+ /*Status of the API being called. */
+};
+
+/* --------------------------------------
+ * IOCTL command IDs for ProcMgr
+ * ---------------------------------------
+ */
+/*
+ * Base command ID for ProcMgr
+ */
+#define PROCMGR_BASE_CMD 0x100
+
+/*
+ * Command for ProcMgr_getConfig
+ */
+#define CMD_PROCMGR_GETCONFIG (PROCMGR_BASE_CMD + 1)
+
+/*
+ * Command for ProcMgr_setup
+ */
+#define CMD_PROCMGR_SETUP (PROCMGR_BASE_CMD + 2)
+
+/*
+ * Command for ProcMgr_setup
+ */
+#define CMD_PROCMGR_DESTROY (PROCMGR_BASE_CMD + 3)
+
+/*
+ * Command for ProcMgr_destroy
+ */
+#define CMD_PROCMGR_PARAMS_INIT (PROCMGR_BASE_CMD + 4)
+
+/*
+ * Command for ProcMgr_create
+ */
+#define CMD_PROCMGR_CREATE (PROCMGR_BASE_CMD + 5)
+
+/*
+ * Command for ProcMgr_delete
+ */
+#define CMD_PROCMGR_DELETE (PROCMGR_BASE_CMD + 6)
+
+/*
+ * Command for ProcMgr_open
+ */
+#define CMD_PROCMGR_OPEN (PROCMGR_BASE_CMD + 7)
+
+/*
+ * Command for ProcMgr_close
+ */
+#define CMD_PROCMGR_CLOSE (PROCMGR_BASE_CMD + 8)
+
+/*
+ * Command for ProcMgr_getAttachParams
+ */
+#define CMD_PROCMGR_GETATTACHPARAMS (PROCMGR_BASE_CMD + 9)
+
+/*
+ * Command for ProcMgr_attach
+ */
+#define CMD_PROCMGR_ATTACH (PROCMGR_BASE_CMD + 10)
+
+/*
+ * Command for ProcMgr_detach
+ */
+#define CMD_PROCMGR_DETACH (PROCMGR_BASE_CMD + 11)
+
+/*
+ * Command for ProcMgr_load
+ */
+#define CMD_PROCMGR_LOAD (PROCMGR_BASE_CMD + 12)
+
+/*
+ * Command for ProcMgr_unload
+ */
+#define CMD_PROCMGR_UNLOAD (PROCMGR_BASE_CMD + 13)
+
+/*
+ * Command for ProcMgr_getStartParams
+ */
+#define CMD_PROCMGR_GETSTARTPARAMS (PROCMGR_BASE_CMD + 14)
+
+/*
+ * Command for ProcMgr_start
+ */
+#define CMD_PROCMGR_START (PROCMGR_BASE_CMD + 15)
+
+/*
+ * Command for ProcMgr_stop
+ */
+#define CMD_PROCMGR_STOP (PROCMGR_BASE_CMD + 16)
+
+/*
+ * Command for ProcMgr_getState
+ */
+#define CMD_PROCMGR_GETSTATE (PROCMGR_BASE_CMD + 17)
+
+/*
+ * Command for ProcMgr_read
+ */
+#define CMD_PROCMGR_READ (PROCMGR_BASE_CMD + 18)
+
+/*
+ * Command for ProcMgr_write
+ */
+#define CMD_PROCMGR_WRITE (PROCMGR_BASE_CMD + 19)
+
+/*
+ * Command for ProcMgr_control
+ */
+#define CMD_PROCMGR_CONTROL (PROCMGR_BASE_CMD + 20)
+
+/*
+ * Command for ProcMgr_translateAddr
+ */
+#define CMD_PROCMGR_TRANSLATEADDR (PROCMGR_BASE_CMD + 22)
+
+/*
+ * Command for ProcMgr_getSymbolAddress
+ */
+#define CMD_PROCMGR_GETSYMBOLADDRESS (PROCMGR_BASE_CMD + 23)
+
+/*
+ * Command for ProcMgr_map
+ */
+#define CMD_PROCMGR_MAP (PROCMGR_BASE_CMD + 24)
+
+/*
+ * Command for ProcMgr_registerNotify
+ */
+#define CMD_PROCMGR_REGISTERNOTIFY (PROCMGR_BASE_CMD + 25)
+
+/*
+ * Command for ProcMgr_getProcInfo
+ */
+#define CMD_PROCMGR_GETPROCINFO (PROCMGR_BASE_CMD + 26)
+
+/*
+ * Command for ProcMgr_unmap
+ */
+#define CMD_PROCMGR_UNMAP (PROCMGR_BASE_CMD + 27)
+
+/*
+ * Command for ProcMgr_getVirtToPhysPages
+ */
+#define CMD_PROCMGR_GETVIRTTOPHYS (PROCMGR_BASE_CMD + 28)
+
+
+
+
+/* ----------------------------------------------------------------------------
+ * Command arguments for ProcMgr
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * Command arguments for ProcMgr_getConfig
+ */
+struct proc_mgr_cmd_args_get_config {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ struct proc_mgr_config *cfg;
+ /*Pointer to the ProcMgr module configuration structure in which the
+ default config is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_setup
+ */
+struct proc_mgr_cmd_args_setup {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ struct proc_mgr_config *cfg;
+ /*Optional ProcMgr module configuration. If provided as NULL, default
+ configuration is used. */
+};
+
+/*
+ * Command arguments for ProcMgr_destroy
+ */
+struct proc_mgr_cmd_args_destroy {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+};
+
+/*
+ * Command arguments for ProcMgr_Params_init
+ */
+struct proc_mgr_cmd_args_params_init {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object. */
+ struct proc_mgr_params *params;
+ /*Pointer to the ProcMgr instance params structure in which the default
+ params is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_create
+ */
+struct proc_mgr_cmd_args_create {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ u16 proc_id;
+ /*Processor ID represented by this ProcMgr instance */
+ struct proc_mgr_params params;
+ /*ProcMgr instance configuration parameters. */
+ void *handle;
+ /*Handle to the created ProcMgr object */
+};
+
+/*
+ * Command arguments for ProcMgr_delete
+ */
+struct proc_mgr_cmd_args_delete{
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Pointer to Handle to the ProcMgr object */
+};
+
+/*
+ * Command arguments for ProcMgr_open
+ */
+struct proc_mgr_cmd_args_open {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ u16 proc_id;
+ /*Processor ID represented by this ProcMgr instance */
+ void *handle;
+ /*Handle to the opened ProcMgr object. */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+/*
+ * Command arguments for ProcMgr_close
+ */
+struct proc_mgr_cmd_args_close{
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+/*
+ * Command arguments for ProcMgr_getAttachParams
+ */
+struct proc_mgr_cmd_args_get_attach_params{
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object. */
+ struct proc_mgr_attach_params *params;
+ /*Pointer to the ProcMgr attach params structure in which the default
+ params is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_attach
+ */
+struct proc_mgr_cmd_args_attach {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object. */
+ struct proc_mgr_attach_params *params;
+ /*Optional ProcMgr attach parameters. */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+/*
+ * Command arguments for ProcMgr_detach
+ */
+struct proc_mgr_cmd_args_detach {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+
+/*
+ * Command arguments for ProcMgr_getStartParams
+ */
+struct proc_mgr_cmd_args_get_start_params {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Entry point for the image*/
+ u32 entry_point;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_start_params *params;
+ /*Pointer to the ProcMgr start params structure in which the default
+ params is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_start
+ */
+struct proc_mgr_cmd_args_start {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Entry point for the image*/
+ u32 entry_point;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_start_params *params;
+ /*Optional ProcMgr start parameters. */
+};
+
+/*
+ * Command arguments for ProcMgr_stop
+ */
+struct proc_mgr_cmd_args_stop {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_stop_params *params;
+ /*Optional ProcMgr stop parameters. */
+};
+
+/*
+ * Command arguments for ProcMgr_getState
+ */
+struct proc_mgr_cmd_args_get_state {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /* Handle to the ProcMgr object */
+ enum proc_mgr_state proc_mgr_state;
+ /*Current state of the ProcMgr object. */
+};
+
+/*
+ * Command arguments for ProcMgr_read
+ */
+struct proc_mgr_cmd_args_read {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 proc_addr;
+ /*Address in space processor's address space of the memory region to
+ read from. */
+ u32 num_bytes;
+ /*IN/OUT parameter. As an IN-parameter, it takes in the number of bytes
+ to be read. When the function returns, this parameter contains the
+ number of bytes actually read. */
+ void *buffer;
+ /*User-provided buffer in which the slave processor's memory contents
+ are to be copied. */
+};
+
+/*
+ * Command arguments for ProcMgr_write
+ */
+struct proc_mgr_cmd_args_write {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 proc_addr;
+ /*Address in space processor's address space of the memory region to
+ write into. */
+ u32 num_bytes;
+ /*IN/OUT parameter. As an IN-parameter, it takes in the number of bytes
+ to be written. When the function returns, this parameter contains the
+ number of bytes actually written. */
+ void *buffer;
+ /*User-provided buffer from which the data is to be written into the
+ slave processor's memory. */
+};
+
+/*
+ * Command arguments for ProcMgr_control
+ */
+struct proc_mgr_cmd_args_control {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ int cmd;
+ /*Device specific processor command */
+ void *arg;
+ /*Arguments specific to the type of command. */
+};
+
+/*
+ * Command arguments for ProcMgr_translateAddr
+ */
+struct proc_mgr_cmd_args_translate_addr {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ void *dst_addr;
+ /*Return parameter: Pointer to receive the translated address. */
+ enum proc_mgr_addr_type dst_addr_type;
+ /*Destination address type requested */
+ void *src_addr;
+ /*Source address in the source address space */
+ enum proc_mgr_addr_type src_addr_type;
+ /*Source address type */
+};
+
+/*
+ * Command arguments for ProcMgr_getSymbolAddress
+ */
+struct proc_mgr_cmd_args_get_symbol_address {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 file_id;
+ /*ID of the file received from the load function */
+ char *symbol_name;
+ /*Name of the symbol */
+ u32 sym_value;
+ /*Return parameter: Symbol address */
+};
+
+/*
+ * Command arguments for ProcMgr_map
+ */
+struct proc_mgr_cmd_args_map {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 proc_addr;
+ /*Slave address to be mapped */
+ u32 size;
+ /*Size (in bytes) of region to be mapped */
+ u32 mapped_addr;
+ /*Return parameter: Mapped address in host address space */
+ u32 mapped_size;
+ /*Return parameter: Mapped size */
+ u32 map_attribs;
+ /*Type of mapping. */
+};
+
+/*
+ * Command arguments for ProcMgr_map
+ */
+struct proc_mgr_cmd_args_unmap {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 mapped_addr;
+ /* Mapped address in host address space */
+};
+
+/*
+ * Command arguments for ProcMgr_registerNotify
+ */
+struct proc_mgr_cmd_args_register_notify {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ int (*callback_fxn)(u16 proc_id, void *handle,
+ enum proc_mgr_state from_state, enum proc_mgr_state to_state);
+ /*Handling function to be registered. */
+ void *args;
+ /*Optional arguments associated with the handler fxn. */
+ enum proc_mgr_state state[];
+ /*Array of target states for which registration is required. */
+};
+
+/*
+ * Command arguments for ProcMgr_getProcInfo
+ */
+struct proc_mgr_cmd_args_get_proc_info {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_proc_info *proc_info;
+ /*Pointer to the ProcInfo object to be populated. */
+};
+
+/*
+ * Command arguments for ProcMgr_virtToPhys
+ */
+struct proc_mgr_cmd_args_get_virt_to_phys {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ u32 da;
+ /* mem entries buffer */
+ u32 *mem_entries;
+ /* number of entries */
+ u32 num_of_entries;
+};
+
+#endif
+
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index a28541b2b1a2..a21096455b64 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -5,6 +5,7 @@
menuconfig MEDIA_SUPPORT
tristate "Multimedia support"
depends on HAS_IOMEM
+ default y
help
If you want to use Video for Linux, DVB for Linux, or DAB adapters,
enable this option and other options below.
@@ -19,6 +20,7 @@ comment "Multimedia core support"
config VIDEO_DEV
tristate "Video For Linux"
+ default y
---help---
V4L core support for video capture and overlay devices, webcams and
AM/FM radio cards.
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 2f83be766d9f..d9be70f6d864 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -779,6 +779,8 @@ source "drivers/media/video/cx18/Kconfig"
source "drivers/media/video/saa7164/Kconfig"
+source "drivers/media/video/omap/Kconfig"
+
config VIDEO_M32R_AR
tristate "AR devices"
depends on M32R && VIDEO_V4L1
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 2af68ee84122..8fc39fd39214 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -121,6 +121,8 @@ obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+obj-$(CONFIG_VIDEO_OMAP3_OUT) += omap/
+
obj-$(CONFIG_USB_DABUSB) += dabusb.o
obj-$(CONFIG_USB_OV511) += ov511.o
obj-$(CONFIG_USB_SE401) += se401.o
@@ -167,7 +169,8 @@ obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
-obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+obj-$(CONFIG_DMM_OMAP) += dmm/
+obj-$(CONFIG_TILER_OMAP) += tiler/
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/dmm/Kconfig b/drivers/media/video/dmm/Kconfig
new file mode 100644
index 000000000000..4af47ea0aef6
--- /dev/null
+++ b/drivers/media/video/dmm/Kconfig
@@ -0,0 +1,6 @@
+config DMM_OMAP
+ tristate "OMAP DMM support"
+ default y
+ help
+ DMM driver for OMAP based boards.
+
diff --git a/drivers/media/video/dmm/Makefile b/drivers/media/video/dmm/Makefile
new file mode 100644
index 000000000000..54a00d637bab
--- /dev/null
+++ b/drivers/media/video/dmm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DMM_OMAP) += dmm_omap.o
+dmm_omap-objs = dmm.o dmm_mem.o
+
diff --git a/drivers/media/video/dmm/dmm.c b/drivers/media/video/dmm/dmm.c
new file mode 100644
index 000000000000..3475421b029b
--- /dev/null
+++ b/drivers/media/video/dmm/dmm.c
@@ -0,0 +1,274 @@
+/*
+ * dmm.c
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h> /* struct cdev */
+#include <linux/kdev_t.h> /* MKDEV() */
+#include <linux/fs.h> /* register_chrdev_region() */
+#include <linux/device.h> /* struct class */
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/err.h> /* IS_ERR() */
+#include <linux/io.h> /* ioremap() */
+#include <linux/errno.h>
+
+#include "dmm.h"
+#include "dmm_mem.h"
+
+#undef __DEBUG__
+#define BITS_32(in_NbBits) ((((u32)1 << in_NbBits) - 1) | ((u32)1 << in_NbBits))
+#define BITFIELD_32(in_UpBit, in_LowBit)\
+ (BITS_32(in_UpBit) & ~((BITS_32(in_LowBit)) >> 1))
+#define BF BITFIELD_32
+
+#ifdef __DEBUG__
+#define DEBUG(x, y) printk(KERN_NOTICE "%s()::%d:%s=(0x%08x)\n", \
+ __func__, __LINE__, x, (s32)y);
+#else
+#define DEBUG(x, y)
+#endif
+
+static s32 dmm_major;
+static s32 dmm_minor;
+void __iomem *dmm_base;
+
+struct dmm_dev {
+ struct cdev cdev;
+};
+
+static struct dmm_dev *dmm_device;
+static struct class *dmmdev_class;
+
+static struct platform_driver dmm_driver_ldm = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dmm",
+ },
+ .probe = NULL,
+ .shutdown = NULL,
+ .remove = NULL,
+};
+
+s32 dmm_pat_refill(struct pat *pd, enum pat_mode mode)
+{
+ void __iomem *r = NULL;
+ u32 v = -1, w = -1;
+
+ /* Only manual refill supported */
+ if (mode != MANUAL)
+ return -EFAULT;
+
+ /*
+ * Check that the DMM_PAT_STATUS register
+ * has not reported an error.
+ */
+ r = (void __iomem *)((u32)dmm_base | DMM_PAT_STATUS__0);
+ v = __raw_readl(r);
+ if ((v & 0xFC00) != 0) {
+ while (1)
+ printk(KERN_ERR "dmm_pat_refill() error.\n");
+ }
+
+ /* Set "next" register to NULL */
+ r = (void __iomem *)((u32)dmm_base | DMM_PAT_DESCR__0);
+ v = __raw_readl(r);
+ w = (v & (~(BF(31, 4)))) | ((((u32)NULL) << 4) & BF(31, 4));
+ __raw_writel(w, r);
+
+ /* Set area to be refilled */
+ r = (void __iomem *)((u32)dmm_base | DMM_PAT_AREA__0);
+ v = __raw_readl(r);
+ w = (v & (~(BF(30, 24)))) | ((((s8)pd->area.y1) << 24) & BF(30, 24));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(23, 16)))) | ((((s8)pd->area.x1) << 16) & BF(23, 16));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(14, 8)))) | ((((s8)pd->area.y0) << 8) & BF(14, 8));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(7, 0)))) | ((((s8)pd->area.x0) << 0) & BF(7, 0));
+ __raw_writel(w, r);
+ dsb();
+
+#ifdef __DEBUG__
+ printk(KERN_NOTICE "\nx0=(%d),y0=(%d),x1=(%d),y1=(%d)\n",
+ (char)pd->area.x0,
+ (char)pd->area.y0,
+ (char)pd->area.x1,
+ (char)pd->area.y1);
+#endif
+
+ /* First, clear the DMM_PAT_IRQSTATUS register */
+ r = (void __iomem *)((u32)dmm_base | (u32)DMM_PAT_IRQSTATUS);
+ __raw_writel(0xFFFFFFFF, r);
+ dsb();
+
+ r = (void __iomem *)((u32)dmm_base | (u32)DMM_PAT_IRQSTATUS_RAW);
+ v = 0xFFFFFFFF;
+
+ while (v != 0x0) {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
+ }
+
+ /* Fill data register */
+ r = (void __iomem *)((u32)dmm_base | DMM_PAT_DATA__0);
+ v = __raw_readl(r);
+
+ /* Apply 4 bit left shft to counter the 4 bit right shift */
+ w = (v & (~(BF(31, 4)))) | ((((u32)(pd->data >> 4)) << 4) & BF(31, 4));
+ __raw_writel(w, r);
+ dsb();
+
+ /* Read back PAT_DATA__0 to see if write was successful */
+ v = 0x0;
+ while (v != pd->data) {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_DATA__0", v);
+ }
+
+ r = (void __iomem *)((u32)dmm_base | (u32)DMM_PAT_CTRL__0);
+ v = __raw_readl(r);
+
+ w = (v & (~(BF(31, 28)))) | ((((u32)pd->ctrl.ini) << 28) & BF(31, 28));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(16, 16)))) | ((((u32)pd->ctrl.sync) << 16) & BF(16, 16));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(9, 8)))) | ((((u32)pd->ctrl.lut_id) << 8) & BF(9, 8));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(6, 4)))) | ((((u32)pd->ctrl.dir) << 4) & BF(6, 4));
+ __raw_writel(w, r);
+
+ v = __raw_readl(r);
+ w = (v & (~(BF(0, 0)))) | ((((u32)pd->ctrl.start) << 0) & BF(0, 0));
+ __raw_writel(w, r);
+ dsb();
+
+ /*
+ * Now, check if PAT_IRQSTATUS_RAW has been
+ * set after the PAT has been refilled
+ */
+ r = (void __iomem *)((u32)dmm_base | (u32)DMM_PAT_IRQSTATUS_RAW);
+ v = 0x0;
+ while ((v & 0x3) != 0x3) {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dmm_pat_refill);
+
+static s32 dmm_open(struct inode *ip, struct file *filp)
+{
+ return 0;
+}
+
+static s32 dmm_release(struct inode *ip, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations dmm_fops = {
+ .open = dmm_open,
+ .release = dmm_release,
+};
+
+static s32 __init dmm_init(void)
+{
+ dev_t dev = 0;
+ s32 r = -1;
+ struct device *device = NULL;
+
+ if (dmm_major) {
+ dev = MKDEV(dmm_major, dmm_minor);
+ r = register_chrdev_region(dev, 1, "dmm");
+ } else {
+ r = alloc_chrdev_region(&dev, dmm_minor, 1, "dmm");
+ dmm_major = MAJOR(dev);
+ }
+
+ dmm_device = kmalloc(sizeof(struct dmm_dev), GFP_KERNEL);
+ if (!dmm_device) {
+ unregister_chrdev_region(dev, 1);
+ return -ENOMEM;
+ }
+ memset(dmm_device, 0x0, sizeof(struct dmm_dev));
+
+ cdev_init(&dmm_device->cdev, &dmm_fops);
+ dmm_device->cdev.owner = THIS_MODULE;
+ dmm_device->cdev.ops = &dmm_fops;
+
+ r = cdev_add(&dmm_device->cdev, dev, 1);
+ if (r)
+ printk(KERN_ERR "cdev_add():failed\n");
+
+ dmmdev_class = class_create(THIS_MODULE, "dmm");
+
+ if (IS_ERR(dmmdev_class)) {
+ printk(KERN_ERR "class_create():failed\n");
+ goto EXIT;
+ }
+
+ device = device_create(dmmdev_class, NULL, dev, NULL, "dmm");
+ if (device == NULL)
+ printk(KERN_ERR "device_create() fail\n");
+
+ r = platform_driver_register(&dmm_driver_ldm);
+
+ dmm_base = ioremap(DMM_BASE, DMM_SIZE);
+ if (!dmm_base)
+ return -ENOMEM;
+
+ __raw_writel(0x88888888, dmm_base + DMM_PAT_VIEW__0);
+ __raw_writel(0x88888888, dmm_base + DMM_PAT_VIEW__1);
+ __raw_writel(0x80808080, dmm_base + DMM_PAT_VIEW_MAP__0);
+ __raw_writel(0x80000000, dmm_base + DMM_PAT_VIEW_MAP_BASE);
+ __raw_writel(0x88888888, dmm_base + DMM_TILER_OR__0);
+ __raw_writel(0x88888888, dmm_base + DMM_TILER_OR__1);
+
+ if (dmm_init_mem())
+ return -ENOMEM;
+
+EXIT:
+ return r;
+}
+
+static void __exit dmm_exit(void)
+{
+ dmm_release_mem();
+ iounmap(dmm_base);
+ platform_driver_unregister(&dmm_driver_ldm);
+ cdev_del(&dmm_device->cdev);
+ kfree(dmm_device);
+ device_destroy(dmmdev_class, MKDEV(dmm_major, dmm_minor));
+ class_destroy(dmmdev_class);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("davidsin@ti.com");
+module_init(dmm_init);
+module_exit(dmm_exit);
diff --git a/drivers/media/video/dmm/dmm.h b/drivers/media/video/dmm/dmm.h
new file mode 100644
index 000000000000..a3adddbf9fd6
--- /dev/null
+++ b/drivers/media/video/dmm/dmm.h
@@ -0,0 +1,128 @@
+/*
+ * dmm.h
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DMM_H
+#define DMM_H
+
+#define DMM_BASE 0x4E000000
+#define DMM_SIZE 0x800
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_AREA__0 0x504
+#define DMM_PAT_CTRL__0 0x508
+#define DMM_PAT_DATA__0 0x50C
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+/**
+ * PAT refill programming mode.
+ */
+enum pat_mode {
+ MANUAL,
+ AUTO
+};
+
+/**
+ * Area definition for DMM physical address translator.
+ */
+struct pat_area {
+ s32 x0:8;
+ s32 y0:8;
+ s32 x1:8;
+ s32 y1:8;
+};
+
+/**
+ * DMM physical address translator control.
+ */
+struct pat_ctrl {
+ s32 start:4;
+ s32 dir:4;
+ s32 lut_id:8;
+ s32 sync:12;
+ s32 ini:4;
+};
+
+/**
+ * PAT descriptor.
+ */
+struct pat {
+ struct pat *next;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ u32 data;
+};
+
+/**
+ * Program the physical address translator.
+ * @param desc
+ * @param mode
+ * @return an error status.
+ */
+s32 dmm_pat_refill(struct pat *desc, enum pat_mode mode);
+
+/**
+ * Request a page from the DMM free page stack.
+ * @return a physical page address.
+ */
+u32 dmm_get_page(void);
+
+/**
+ * Return a used page to the DMM free page stack.
+ * @param page_addr a physical page address.
+ */
+void dmm_free_page(u32 page_addr);
+
+/**
+ * Request a set of pages from the DMM free page stack.
+ * @return a pointer to a list of physical page addresses.
+ */
+u32 *dmm_get_pages(s32 n);
+
+/**
+ * Return a set of used pages to the DMM free page stack.
+ * @param list a pointer to a list of physical page addresses.
+ */
+void dmm_free_pages(u32 *list);
+
+#endif
diff --git a/drivers/media/video/dmm/dmm_mem.c b/drivers/media/video/dmm/dmm_mem.c
new file mode 100644
index 000000000000..f91c561b4a99
--- /dev/null
+++ b/drivers/media/video/dmm/dmm_mem.c
@@ -0,0 +1,307 @@
+/*
+ * dmm_mem.c
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <asm/cacheflush.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+#include "dmm_mem.h"
+
+/**
+ * Number of pages to allocate when
+ * refilling the free page stack.
+ */
+#define MAX 16
+#define DMM_PAGE 0x1000
+
+/**
+ * Used to keep track of mem per
+ * dmm_get_pages call.
+ */
+struct fast {
+ struct list_head list;
+ u32 *mem;
+ u32 *pa;
+ u32 num;
+};
+
+/**
+ * Used to keep track of the page struct ptrs
+ * and physical addresses of each page.
+ */
+struct mem {
+ struct list_head list;
+ struct page *pg;
+ u32 pa;
+};
+
+static struct fast fast_list;
+static struct mem free_list;
+static struct mem used_list;
+static struct mutex mtx;
+
+static void dmm_free_fast_list(struct fast *fast)
+{
+ struct list_head *pos = NULL, *q = NULL;
+ struct fast *f = NULL;
+ s32 i = 0;
+
+ mutex_lock(&mtx);
+ list_for_each_safe(pos, q, &fast->list) {
+ f = list_entry(pos, struct fast, list);
+ for (i = 0; i < f->num; i++) {
+ list_add(&((struct mem *)f->mem[i])->list,
+ &free_list.list);
+ }
+ kfree(f->pa);
+ kfree(f->mem);
+ list_del(pos);
+ kfree(f);
+ }
+ mutex_unlock(&mtx);
+}
+
+static u32 fill_page_stack(struct mem *mem)
+{
+ s32 i = 0;
+ struct mem *m = NULL;
+
+ for (i = 0; i < MAX; i++) {
+ m = kmalloc(sizeof(struct mem), GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+ memset(m, 0x0, sizeof(struct mem));
+
+ m->pg = alloc_page(GFP_KERNEL | GFP_DMA);
+ if (!m->pg) {
+ kfree(m);
+ return -ENOMEM;
+ }
+
+ m->pa = page_to_phys(m->pg);
+
+ /**
+ * Note: we need to flush the cache
+ * entry for each page we allocate.
+ */
+ dmac_flush_range((void *)page_address(m->pg),
+ (void *)page_address(m->pg) + DMM_PAGE);
+ outer_flush_range(m->pa, m->pa + DMM_PAGE);
+
+ mutex_lock(&mtx);
+ list_add(&m->list, &mem->list);
+ mutex_unlock(&mtx);
+ }
+ return 0x0;
+}
+
+static void dmm_free_page_stack(struct mem *mem)
+{
+ struct list_head *pos = NULL, *q = NULL;
+ struct mem *m = NULL;
+
+ mutex_lock(&mtx);
+ list_for_each_safe(pos, q, &mem->list) {
+ m = list_entry(pos, struct mem, list);
+ __free_page(m->pg);
+ list_del(pos);
+ kfree(m);
+ }
+ mutex_unlock(&mtx);
+}
+
+u32 dmm_get_page(void)
+{
+ struct list_head *pos = NULL, *q = NULL;
+ struct mem *m = NULL;
+
+ if (list_empty_careful(&free_list.list))
+ if (fill_page_stack(&free_list))
+ return 0;
+
+ mutex_lock(&mtx);
+ pos = NULL;
+ q = NULL;
+ list_for_each_safe(pos, q, &free_list.list) {
+ m = list_entry(pos, struct mem, list);
+ list_move(&m->list, &used_list.list);
+ break;
+ }
+ mutex_unlock(&mtx);
+
+ if (m != NULL)
+ return m->pa;
+ else
+ return 0x0;
+}
+EXPORT_SYMBOL(dmm_get_page);
+
+void dmm_free_page(u32 page_addr)
+{
+ struct list_head *pos = NULL, *q = NULL;
+ struct mem *m = NULL;
+
+ mutex_lock(&mtx);
+ pos = NULL;
+ q = NULL;
+ list_for_each_safe(pos, q, &used_list.list) {
+ m = list_entry(pos, struct mem, list);
+ if (m->pa == page_addr) {
+ list_move(&m->list, &free_list.list);
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+}
+EXPORT_SYMBOL(dmm_free_page);
+
+u32 dmm_init_mem()
+{
+ INIT_LIST_HEAD(&free_list.list);
+ INIT_LIST_HEAD(&used_list.list);
+ INIT_LIST_HEAD(&fast_list.list);
+ mutex_init(&mtx);
+
+ if (list_empty_careful(&free_list.list))
+ if (fill_page_stack(&free_list))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dmm_release_mem()
+{
+ dmm_free_fast_list(&fast_list);
+ dmm_free_page_stack(&free_list);
+ dmm_free_page_stack(&used_list);
+ mutex_destroy(&mtx);
+}
+
+u32 *dmm_get_pages(s32 n)
+{
+ s32 i = 0;
+ struct list_head *pos = NULL, *q = NULL;
+ struct mem *m = NULL;
+ struct fast *f = NULL;
+
+ if (n <= 0 || n > 0x8000)
+ return NULL;
+
+ if (list_empty_careful(&free_list.list))
+ if (fill_page_stack(&free_list))
+ return NULL;
+
+ f = kmalloc(sizeof(struct fast), GFP_KERNEL);
+ if (!f)
+ return NULL;
+ memset(f, 0x0, sizeof(struct fast));
+
+ /* array of mem struct pointers */
+ f->mem = kmalloc(n * 4, GFP_KERNEL);
+ if (!f->mem) {
+ kfree(f); return NULL;
+ }
+ memset(f->mem, 0x0, n * 4);
+
+ /* array of physical addresses */
+ f->pa = kmalloc(n * 4, GFP_KERNEL);
+ if (!f->pa) {
+ kfree(f->mem); kfree(f); return NULL;
+ }
+ memset(f->pa, 0x0, n * 4);
+
+ /*
+ * store the number of mem structs so that we
+ * know how many to free later.
+ */
+ f->num = n;
+
+ for (i = 0; i < n; i++) {
+ if (list_empty_careful(&free_list.list))
+ if (fill_page_stack(&free_list))
+ goto cleanup;
+
+ mutex_lock(&mtx);
+ pos = NULL;
+ q = NULL;
+
+ /*
+ * remove one mem struct from the free list and
+ * add the address to the fast struct mem array
+ */
+ list_for_each_safe(pos, q, &free_list.list) {
+ m = list_entry(pos, struct mem, list);
+ f->mem[i] = (u32)m;
+ list_del(pos);
+ break;
+ }
+ mutex_unlock(&mtx);
+
+ if (m != NULL)
+ f->pa[i] = m->pa;
+ else
+ goto cleanup;
+ }
+
+ mutex_lock(&mtx);
+ list_add(&f->list, &fast_list.list);
+ mutex_unlock(&mtx);
+
+ if (f != NULL)
+ return f->pa;
+cleanup:
+ for (; i > 0; i--) {
+ mutex_lock(&mtx);
+ list_add(&((struct mem *)f->mem[i - 1])->list, &free_list.list);
+ mutex_unlock(&mtx);
+ }
+ kfree(f->pa);
+ kfree(f->mem);
+ kfree(f);
+ return NULL;
+}
+EXPORT_SYMBOL(dmm_get_pages);
+
+void dmm_free_pages(u32 *list)
+{
+ struct list_head *pos = NULL, *q = NULL;
+ struct fast *f = NULL;
+ s32 i = 0;
+
+ mutex_lock(&mtx);
+ pos = NULL;
+ q = NULL;
+ list_for_each_safe(pos, q, &fast_list.list) {
+ f = list_entry(pos, struct fast, list);
+ if (f->pa[0] == list[0]) {
+ for (i = 0; i < f->num; i++) {
+ list_add(&((struct mem *)f->mem[i])->list,
+ &free_list.list);
+ }
+ list_del(pos);
+ kfree(f->pa);
+ kfree(f->mem);
+ kfree(f);
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+}
+EXPORT_SYMBOL(dmm_free_pages);
+
diff --git a/drivers/media/video/dmm/dmm_mem.h b/drivers/media/video/dmm/dmm_mem.h
new file mode 100644
index 000000000000..9a80df5e09b4
--- /dev/null
+++ b/drivers/media/video/dmm/dmm_mem.h
@@ -0,0 +1,30 @@
+/*
+ * dmm_mem.h
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#ifndef DMM_MEM_H
+#define DMM_MEM_H
+
+/**
+ * Initialize the DMM page stacks.
+ * @return an error status.
+ */
+u32 dmm_init_mem(void);
+
+/**
+ * Free the DMM page stacks.
+ */
+void dmm_release_mem(void);
+
+#endif
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
new file mode 100644
index 000000000000..141cffc9ed51
--- /dev/null
+++ b/drivers/media/video/omap/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_OMAP3_OUT
+ tristate "OMAP2/OMAP3/OMAP4 V4L2-DSS driver"
+ select VIDEOBUF_GEN
+ select VIDEOBUF_DMA_SG
+ select OMAP2_DSS
+ depends on VIDEO_DEV && (ARCH_OMAP24XX || ARCH_OMAP34XX || ARCH_OMAP4)
+ default y
+ ---help---
+ V4L2 DSS support for OMAP2/3/4 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
new file mode 100644
index 000000000000..d8c5ed329bb3
--- /dev/null
+++ b/drivers/media/video/omap/Makefile
@@ -0,0 +1,4 @@
+omap-vout-mod-objs := omap_vout.o omap_voutlib.o
+
+obj-$(CONFIG_VIDEO_OMAP3_OUT) += omap-vout-mod.o
+
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
new file mode 100755
index 000000000000..0dfe422036ce
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout.c
@@ -0,0 +1,3182 @@
+/*
+ * drivers/media/video/omap/omap_vout.c
+ *
+ * Copyright (C) 2005-2009 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim Modified VRFB based Rotation,
+ * The image data is always read from 0 degree
+ * view and written
+ * to the virtual space of desired rotation angle
+ * 4-DEC-2006 Jian Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik Changed to used the new DSS paches by Tomi
+ * Changed driver to use video_ioctl2
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+
+#include <media/videobuf-dma-sg.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#include <asm/processor.h>
+#include <plat/dma.h>
+#include <plat/vram.h>
+#include <plat/vrfb.h>
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+
+#ifdef CONFIG_ARCH_OMAP4 /* TODO: correct this!*/
+#include <mach/tiler.h>
+
+#endif
+
+#define TILER_ALLOCATE_V4L2
+
+MODULE_AUTHOR("Texas Instruments.");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+#define OMAP_VIDEO1 0
+#define OMAP_VIDEO2 1
+#ifdef CONFIG_ARCH_OMAP4
+#define OMAP_VIDEO3 2
+#define OMAP4_MAX_OVERLAYS 4
+#endif
+
+/* configuration macros */
+#define VOUT_NAME "omap_vout"
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+#ifdef CONFIG_ARCH_OMAP4
+#define NUM_OF_VIDEO_CHANNELS 3
+#else
+#define NUM_OF_VIDEO_CHANNELS 2
+#endif
+
+#define VID_MAX_WIDTH 2048 /* Largest width */
+#define VID_MAX_HEIGHT 2048 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define DMA_CHAN_ALLOTED 1
+#define DMA_CHAN_NOT_ALLOTED 0
+#define MAX_PIXELS_PER_LINE 2048
+#define VRFB_TX_TIMEOUT 1000
+
+/* IRQ Bits mask of DSS */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+static struct videobuf_queue_ops video_vbq_ops;
+
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+#ifdef CONFIG_ARCH_OMAP4
+static u32 video3_numbuffers = 3;
+static u32 video3_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+#endif
+static u32 vid1_static_vrfb_alloc;
+static u32 vid2_static_vrfb_alloc;
+static int debug;
+
+struct mutex my_lock;
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+ "Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+ "Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+ "Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+ "Size of the buffer to be allocated for video2 device");
+
+#ifdef CONFIG_ARCH_OMAP4
+module_param(video3_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video3_numbuffers, "Number of buffers to be allocated at \
+ init time for Video3 device.");
+
+module_param(video3_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize, "Size of the buffer to be allocated for \
+ video3 device");
+#endif
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* Local Helper functions */
+static int omap_vout_create_video_devices(struct platform_device *pdev);
+static int omapvid_apply_changes(struct omap_vout_device *vout);
+static int omapvid_init(struct omap_vout_device *vout, u32 addr, u32 uv_addr);
+static int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy,
+ int outw, int outh, u32 addr, u32 uv_addr);
+static enum omap_color_mode video_mode_to_dss_mode(
+ struct v4l2_pix_format *pix);
+static void omap_vout_isr(void *arg, unsigned int irqstatus);
+static void omap_vout_cleanup_device(struct omap_vout_device *vout);
+/*
+ * Maximum amount of memory to use for rendering buffers.
+ * Default is enough to four (RGB24) DVI 720P buffers.
+ */
+#define MAX_ALLOWED_VIDBUFFERS 4
+
+/* list of image formats supported by OMAP2 video pipelines */
+const static struct v4l2_fmtdesc omap_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * We interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
+ * this for RGB24 unpack mode, the last 8 bits are ignored
+ * */
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
+ * this for RGB24 packed mode
+ *
+ */
+ .description = "RGB24, le",
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .description = "NV12 - YUV420 format",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+#ifndef TILER_ALLOCATE_V4L2
+/* Allocate buffers */
+static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ unsigned long virt_addr, addr;
+ u32 size;
+
+ size = PAGE_ALIGN(buf_size);
+ virt_addr = (u32) alloc_pages_exact(size, GFP_KERNEL | GFP_DMA);
+ addr = virt_addr;
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+#endif
+
+/* Free buffers */
+static void omap_vout_free_buffer(unsigned long virtaddr, u32 phys_addr,
+ u32 buf_size)
+{
+ unsigned long addr = virtaddr;
+ u32 size;
+
+ size = PAGE_ALIGN(buf_size);
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages_exact((void *) virtaddr, size);
+}
+
+#ifndef CONFIG_ARCH_OMAP4
+/* Function for allocating video buffers */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory
+ && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_phy_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+#endif
+
+/* Try format */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+ (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt == NUM_OUTPUT_FORMATS)
+ ifmt = 0;
+
+ pix->pixelformat = omap_formats[ifmt].pixelformat;
+ pix->field = V4L2_FIELD_ANY;
+ pix->priv = 0;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = 1; /* TODO: check this? */
+ break;
+ }
+
+ /* :NOTE: NV12 has width bytes per line in both Y and UV sections */
+ pix->bytesperline = pix->width * bpp;
+
+#ifdef TILER_ALLOCATE_V4L2
+ pix->bytesperline = (pix->bytesperline + PAGE_SIZE - 1) &
+ ~(PAGE_SIZE - 1);
+#endif
+
+ /* :TODO: add 2-pixel round restrictions to YUYV and NV12 formats */
+ pix->sizeimage = pix->bytesperline * pix->height;
+ if (V4L2_PIX_FMT_NV12 == pix->pixelformat)
+ pix->sizeimage += pix->sizeimage >> 1;
+
+ return bpp;
+}
+
+/*
+ * omap_vout_uservirt_to_phys: This inline function is used to convert user
+ * space virtual address to physical address.
+ */
+static inline u32 omap_vout_uservirt_to_phys(u32 virtp)
+{
+ unsigned long physp = 0;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+
+ vma = find_vma(mm, virtp);
+ /* For kernel direct-mapped memory, take the easy way */
+ if (virtp >= PAGE_OFFSET) {
+ physp = virt_to_phys((void *) virtp);
+ } else if (vma && (vma->vm_flags & VM_IO)
+ && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated,
+ mmaped-to-usermode addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+ down_read(&current->mm->mmap_sem);
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages,
+ 1, 0, &pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (res == nr_pages) {
+ physp = __pa(page_address(&pages[0]) +
+ (virtp & ~PAGE_MASK));
+ } else {
+ printk(KERN_WARNING VOUT_NAME
+ "get_user_pages failed\n");
+ return 0;
+ }
+ }
+
+ return physp;
+}
+
+#ifndef CONFIG_ARCH_OMAP4
+/* This functions wakes up the application once
+ * the DMA transfer to VRFB space is completed. */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/* Release the VRFB context once the module exits */
+static void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+#ifndef CONFIG_ARCH_OMAP4
+/* TODO: this is temporary disabling of vrfb to test V4L2: needs to be
+ corrected for future
+*/
+ int i;
+ for (i = 0; i < 4; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+#endif
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+
+}
+#endif
+
+/* Return true if rotation is 90 or 270 */
+static inline int rotate_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/* Return true if rotation is enabled */
+static inline int rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/* Reverse the rotation degree if mirroring is enabled */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+#ifndef CONFIG_ARCH_OMAP4
+ if (!vout->mirror)
+ return vout->rotation;
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+#else
+ return vout->rotation;
+#endif
+}
+
+/* Free the V4L2 buffers */
+static void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+ int i, numbuffers;
+
+ /* Allocate memory for the buffers */
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_VIDEO3 == vout->vid) {
+ numbuffers = video3_numbuffers;
+ vout->buffer_size = video3_bufsize;
+ } else
+#endif
+ {
+ numbuffers = (vout->vid) ? video2_numbuffers
+ : video1_numbuffers;
+ vout->buffer_size = (vout->vid) ? video2_bufsize
+ : video1_bufsize;
+ }
+
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buf_phy_addr[i], vout->buffer_size);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_virt_addr[i] = 0;
+ }
+}
+
+#ifndef CONFIG_ARCH_OMAP4
+/* Free VRFB buffers */
+static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < 4; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_phy_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+/* Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.*/
+static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if ((rotation_enabled(vout)) &&
+ !vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+/* TODO: this is temporary disabling of vrfb to test V4L2: needs to be
+ corrected for future
+*/
+ for (i = 0; i < *count; i++) {
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i],
+ vout->pix.width, vout->pix.height,
+ vout->bpp, yuv_mode);
+ }
+ return 0;
+}
+#endif
+
+static void omap_vout_tiler_buffer_free(struct omap_vout_device *vout,
+ unsigned int count,
+ unsigned int startindex)
+{
+ int i;
+
+ if (startindex < 0)
+ startindex = 0;
+ if (startindex + count > VIDEO_MAX_FRAME)
+ count = VIDEO_MAX_FRAME - startindex;
+
+ for (i = startindex; i < startindex + count; i++) {
+ if (vout->buf_phy_addr_alloced[i])
+ tiler_free(vout->buf_phy_addr_alloced[i]);
+ if (vout->buf_phy_uv_addr_alloced[i])
+ tiler_free(vout->buf_phy_uv_addr_alloced[i]);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_phy_addr_alloced[i] = 0;
+ vout->buf_phy_uv_addr[i] = 0;
+ vout->buf_phy_uv_addr_alloced[i] = 0;
+ }
+}
+
+/* Allocate the buffers for TILER space. Ideally, the buffers will be ONLY
+ in tiler space, with different rotated views available by just a convert.
+ */
+static int omap_vout_tiler_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex,
+ struct v4l2_pix_format *pix)
+{
+ int i, aligned = 1;
+ enum tiler_fmt fmt;
+
+ /* normalize buffers to allocate so we stay within bounds */
+ int start = (startindex < 0) ? 0 : startindex;
+ int n_alloc = (start + *count > VIDEO_MAX_FRAME)
+ ? VIDEO_MAX_FRAME - start : *count;
+ int bpp = omap_vout_try_format(pix);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "tiler buffer alloc:\n"
+ "count - %d, start -%d :\n", *count, startindex);
+
+ /* special allocation scheme for NV12 format */
+ if (OMAP_DSS_COLOR_NV12 == video_mode_to_dss_mode(pix)) {
+ tiler_alloc_packed_nv12(&n_alloc, pix->width,
+ pix->height,
+ (void **) vout->buf_phy_addr + start,
+ (void **) vout->buf_phy_uv_addr + start,
+ (void **) vout->buf_phy_addr_alloced + start,
+ (void **) vout->buf_phy_uv_addr_alloced + start,
+ aligned);
+ } else {
+ /* Only bpp of 1, 2, and 4 is supported by tiler */
+ fmt = (bpp == 1 ? TILFMT_8BIT :
+ bpp == 2 ? TILFMT_16BIT :
+ bpp == 4 ? TILFMT_32BIT : TILFMT_INVALID);
+ if (fmt == TILFMT_INVALID)
+ return -ENOMEM;
+
+ tiler_alloc_packed(&n_alloc, fmt, pix->width,
+ pix->height,
+ (void **) vout->buf_phy_addr + start,
+ (void **) vout->buf_phy_addr_alloced + start,
+ aligned);
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "allocated %d buffers\n", n_alloc);
+
+ if (n_alloc < *count) {
+ if (n_alloc && (startindex == -1 ||
+ V4L2_MEMORY_MMAP != vout->memory)) {
+ /* TODO: check this condition's logic */
+ omap_vout_tiler_buffer_free(vout, n_alloc, start);
+ *count = 0;
+ return -ENOMEM;
+ }
+ }
+
+ for (i = start; i < start + n_alloc; i++) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "y=%08lx (%d) uv=%08lx (%d)\n",
+ vout->buf_phy_addr[i],
+ vout->buf_phy_addr_alloced[i] ? 1 : 0,
+ vout->buf_phy_uv_addr[i],
+ vout->buf_phy_uv_addr_alloced[i] ? 1 : 0);
+ }
+
+ *count = n_alloc;
+
+ return 0;
+}
+
+#ifdef TILER_ALLOCATE_V4L2
+/* Free tiler buffers */
+static void omap_vout_free_tiler_buffers(struct omap_vout_device *vout)
+{
+ omap_vout_tiler_buffer_free(vout, vout->buffer_allocated, 0);
+ vout->buffer_allocated = 0;
+}
+#endif
+
+/* Convert V4L2 rotation to DSS rotation
+ * V4L2 understand 0, 90, 180, 270.
+ * convert to 0, 1, 2 and 3 repsectively for DSS */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation, enum dss_rotation *rotation,
+ bool mirror)
+{
+ switch (v4l2_rotation) {
+ case 90:
+ *rotation = dss_rotation_90_degree;
+ return 0;
+ case 180:
+ *rotation = dss_rotation_180_degree;
+ return 0;
+ case 270:
+ *rotation = dss_rotation_270_degree;
+ return 0;
+ case 0:
+ *rotation = dss_rotation_0_degree;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+}
+
+/* Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+static int omap_vout_calculate_offset(struct omap_vout_device *vout, int idx)
+{
+ struct v4l2_pix_format *pix = &vout->pix;
+ struct v4l2_rect *crop = &vout->crop;
+ enum dss_rotation rotation;
+ int vr_ps = 1, ps = 2;
+#ifndef CONFIG_ARCH_OMAP4
+ int offset = 0, temp_ps = 2;
+ bool mirroring = vout->mirror;
+#endif
+ int ctop = 0, cleft = 0, line_length = 0;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *cur_display;
+ int *cropped_offset = vout->cropped_offset + idx;
+#ifdef CONFIG_ARCH_OMAP4
+ int *cropped_uv_offset = vout->cropped_uv_offset + idx;
+ unsigned long addr = 0, uv_addr = 0;
+#endif
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return -1;
+ cur_display = ovl->manager->device;
+
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+ if (rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+#ifndef CONFIG_ARCH_OMAP4
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+#else
+ /* :TODO: change v4l2 to send TSPtr as tiled addresses to DSS2 */
+ addr = tiler_get_natural_addr(vout->queued_buf_addr[idx]);
+
+ if (OMAP_DSS_COLOR_NV12 == vout->dss_mode) {
+ *cropped_offset = tiler_stride(addr) * crop->top + crop->left;
+ uv_addr = tiler_get_natural_addr(
+ vout->queued_buf_uv_addr[idx]);
+ /* :TODO: only allow even crops for NV12 */
+ *cropped_uv_offset = tiler_stride(uv_addr) * (crop->top >> 1)
+ + (crop->left & ~1);
+ } else {
+ *cropped_offset =
+ tiler_stride(addr) * crop->top + crop->left * ps;
+ }
+#endif
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s Offset:%x\n", __func__, *cropped_offset);
+ return 0;
+}
+
+/* convert V4L2 pixel format to DSS pixel format */
+static enum omap_color_mode video_mode_to_dss_mode(
+ struct v4l2_pix_format *pix)
+{
+
+ switch (pix->pixelformat) {
+#ifdef CONFIG_ARCH_OMAP4
+ case V4L2_PIX_FMT_NV12:
+ return OMAP_DSS_COLOR_NV12;
+#endif
+ case 0:
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ return OMAP_DSS_COLOR_YUV2;
+
+ case V4L2_PIX_FMT_UYVY:
+ return OMAP_DSS_COLOR_UYVY;
+
+ case V4L2_PIX_FMT_RGB565:
+ return OMAP_DSS_COLOR_RGB16;
+
+ case V4L2_PIX_FMT_RGB24:
+ return OMAP_DSS_COLOR_RGB24P;
+
+#ifdef CONFIG_ARCH_OMAP4
+ case V4L2_PIX_FMT_RGB32:
+ return OMAP_DSS_COLOR_ARGB32;
+#endif
+ case V4L2_PIX_FMT_BGR32:
+ return OMAP_DSS_COLOR_RGBX32;
+
+ default:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+/* helper function: for NV12, returns uv buffer address given single buffer
+ * for yuv - y buffer will still be in the input.
+ * used only for non-TILER case
+*/
+u32 omapvid_get_uvbase_nv12(u32 paddr, int height, int width)
+{
+ u32 puv_addr = 0;
+
+ puv_addr = (paddr + (height * width));
+ return puv_addr;
+}
+
+/* Setup the overlay */
+int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy, int outw,
+ int outh, u32 addr, u32 uv_addr)
+{
+ int r = 0;
+ enum dss_rotation rotation;
+ bool mirror;
+ int cropheight, cropwidth, pixheight, pixwidth;
+ struct omap_overlay_info info;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+ (outw != vout->pix.width || outh != vout->pix.height)) {
+ r = -EINVAL;
+ goto err;
+ }
+
+#ifndef CONFIG_ARCH_OMAP4
+ if (vout->pix.pixelformat == V4L2_PIX_FMT_RGB32) {
+ /* TODO: OMAP4: check this ??*/
+ vout->dss_mode = (vout->vid == OMAP_VIDEO1) ?
+ OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+ } else
+#endif
+ vout->dss_mode = video_mode_to_dss_mode(&vout->pix);
+
+ if (vout->dss_mode == -EINVAL) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ rotation = vout->rotation;
+ mirror = vout->mirror;
+
+ /* Setup the input plane parameters according to
+ * rotation value selected.
+ */
+ if (rotate_90_or_270(vout)) {
+ cropheight = vout->crop.width;
+ cropwidth = vout->crop.height;
+ pixheight = vout->pix.width;
+ pixwidth = vout->pix.height;
+ } else {
+ cropheight = vout->crop.height;
+ cropwidth = vout->crop.width;
+ pixheight = vout->pix.height;
+ pixwidth = vout->pix.width;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+ if (addr)
+ info.paddr = addr;
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_COLOR_NV12 == vout->dss_mode)
+ info.p_uv_addr = uv_addr;
+ else
+ info.p_uv_addr = (u32) NULL;
+#endif
+ info.vaddr = NULL;
+ info.width = cropwidth;
+ info.height = cropheight;
+ info.color_mode = vout->dss_mode;
+ info.mirror = mirror;
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+ info.global_alpha =
+ vout->vid_info.overlays[0]->info.global_alpha;
+#ifdef CONFIG_ARCH_OMAP4
+ info.rotation_type = OMAP_DSS_ROT_TILER;
+ info.screen_width = pixwidth;
+ info.rotation = vout->rotation;
+#else
+ if (!rotation_enabled(vout)) {
+ info.rotation = 0;
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ info.screen_width = pixwidth;
+ } else {
+ info.rotation = vout->rotation;
+ info.rotation_type = OMAP_DSS_ROT_VRFB;
+ info.screen_width = 2048;
+ }
+#endif
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s info.enable=%d info.addr=%x info.width=%d\n info.height=%d "
+ "info.color_mode=%d info.rotation=%d info.mirror=%d\n "
+ "info.posx=%d info.posy=%d info.out_width = %d info.out_height=%d\n "
+ "info.rotation_type=%d info.screen_width=%d\n", __func__, info.enabled,
+ info.paddr, info.width, info.height, info.color_mode, info.rotation,
+ info.mirror, info.pos_x, info.pos_y, info.out_width, info.out_height,
+ info.rotation_type, info.screen_width);
+
+#ifdef CONFIG_ARCH_OMAP4
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "info.puvaddr=%x\n",
+ info.p_uv_addr);
+#endif
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ goto err;
+
+ return 0;
+err:
+ printk(KERN_WARNING VOUT_NAME "setup_overlay failed\n");
+ return r;
+}
+
+/* Initialize the overlay structure */
+int omapvid_init(struct omap_vout_device *vout, u32 addr, u32 uv_addr)
+{
+ int r = 0;
+ struct omapvideo_info *ovid = &vout->vid_info;
+ struct omap_overlay *ovl;
+ int posx, posy;
+ int outw, outh, temp, rotation;
+ int i;
+ struct v4l2_window *win;
+ struct omap_video_timings *timing;
+
+ win = &vout->win;
+ rotation = vout->rotation;
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+
+ timing = &ovl->manager->device->panel.timings;
+
+ outw = win->w.width;
+ outh = win->w.height;
+ posx = win->w.left;
+ posy = win->w.top;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ /* Invert the height and width for 90
+ * and 270 degree rotation
+ */
+ temp = outw;
+ outw = outh;
+ outh = temp;
+#ifndef CONFIG_ARCH_OMAP4
+ posy = (timing->y_res - win->w.width)-
+ win->w.left;
+ posx = win->w.top;
+#endif
+ break;
+
+ case dss_rotation_180_degree:
+#ifndef CONFIG_ARCH_OMAP4
+ posx = (timing->x_res - win->w.width) -
+ win->w.left;
+ posy = (timing->y_res - win->w.height) -
+ win->w.top;
+#endif
+ break;
+
+ case dss_rotation_270_degree:
+ temp = outw;
+ outw = outh;
+ outh = temp;
+#ifndef CONFIG_ARCH_OMAP4
+ posy = win->w.left;
+ posx = (timing->x_res - win->w.height)
+ - win->w.top;
+#endif
+ break;
+
+ default:
+ break;
+ }
+
+ r = omapvid_setup_overlay(vout, ovl, posx, posy, outw,
+ outh, addr, uv_addr);
+ if (r)
+ goto err;
+ }
+ return 0;
+err:
+ printk(KERN_WARNING VOUT_NAME "apply_changes failed\n");
+ return r;
+}
+
+/* Apply the changes set the go bit of DSS */
+int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+ struct omapvideo_info *ovid = &vout->vid_info;
+ struct omap_overlay *ovl;
+ int i;
+
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ ovl->manager->apply(ovl->manager);
+ }
+ return 0;
+
+}
+
+/* Video buffer call backs */
+
+/* Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ struct omap_vout_device *vout = q->priv_data;
+ int i;
+#ifndef TILER_ALLOCATE_V4L2
+ int startindex = 0, j;
+ u32 phy_addr = 0, virt_addr = 0;
+#endif
+ if (!vout)
+ return -EINVAL;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+ return -EINVAL;
+
+#ifndef TILER_ALLOCATE_V4L2
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_VIDEO3 == vout->vid)
+ startindex = video3_numbuffers;
+ else
+#endif
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+ *count = startindex;
+
+#ifndef CONFIG_ARCH_OMAP4
+ if ((rotation_enabled(vout))
+ && *count > 4)
+ *count = 4;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ if (rotation_enabled(vout)) {
+ if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+ return -ENOMEM;
+ }
+#endif /* CONFIG_ARCH_OMAP4 */
+
+ /* Now allocated the V4L2 buffers */
+ *size = vout->buffer_size;
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_VIDEO3 == vout->vid)
+ startindex = video3_numbuffers;
+ else
+#endif
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = startindex; i < *count; i++) {
+ vout->buffer_size = *size;
+
+ virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+ &phy_addr);
+ if (!virt_addr) {
+ if (!rotation_enabled(vout))
+ break;
+ /* Free the VRFB buffers if no space for V4L2 buffers */
+ for (j = i; j < *count; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_phy_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+ vout->buf_virt_addr[i] = virt_addr;
+ vout->buf_phy_addr[i] = phy_addr;
+ }
+ *count = vout->buffer_allocated = i;
+
+#else /* TILER_ALLOCATE_V4L2 */
+
+ /* tiler_alloc_buf to be called here
+ pre-requisites: rotation, format?
+ based on that buffers will be allocated.
+ */
+ /* Now allocated the V4L2 buffers */
+ /* i is the block-width - either 4K or 8K, depending upon input width*/
+ i = (vout->pix.width * vout->bpp +
+ TILER_PAGE - 1) & ~(TILER_PAGE - 1);
+
+ /* for NV12 format, buffer is height + height / 2*/
+ if (OMAP_DSS_COLOR_NV12 == vout->dss_mode)
+ *size = vout->buffer_size = (vout->pix.height * 3/2 * i);
+ else
+ *size = vout->buffer_size = (vout->pix.height * i);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "\nheight=%d, size = %d, vout->buffer_sz=%d\n",
+ vout->pix.height, *size, vout->buffer_size);
+ if (omap_vout_tiler_buffer_setup(vout, count, 0, &vout->pix))
+ return -ENOMEM;
+#endif /* TILER_ALLOCATE_V4L2 */
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return 0;
+
+ return 0;
+}
+
+#ifndef TILER_ALLOCATE_V4L2
+/* Free the V4L2 buffers additionally allocated than default
+ * number of buffers and free all the VRFB buffers */
+static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
+{
+ int num_buffers = 0, i;
+
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_VIDEO3 == vout->vid)
+ num_buffers = video3_numbuffers;
+ else
+#endif
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ if (vout->buf_virt_addr[i]) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buf_phy_addr[i], vout->buffer_size);
+ }
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (!vout->vrfb_static_allocation) {
+ for (i = 0; i < 4; i++) {
+ if (vout->smsshado_virt_addr[i]) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[i],
+ vout->smsshado_phy_addr[i],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[i] = 0;
+ vout->smsshado_phy_addr[i] = 0;
+ }
+ }
+ }
+ vout->buffer_allocated = num_buffers;
+}
+#endif
+
+/* This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct omap_vout_device *vout = q->priv_data;
+ struct videobuf_dmabuf *dmabuf = NULL;
+
+#ifndef CONFIG_ARCH_OMAP4
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+ enum dss_rotation rotation;
+ struct vid_vrfb_dma *tx;
+#endif
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+
+#ifndef TILER_ALLOCATE_V4L2
+ /* if user pointer memory mechanism is used, get the physical
+ * address of the buffer
+ */
+ if (V4L2_MEMORY_USERPTR == vb->memory) {
+ if (0 == vb->baddr)
+ return -EINVAL;
+ /* Virtual address */
+ /* priv points to struct videobuf_pci_sg_memory. But we went
+ * pointer to videobuf_dmabuf, which is member of
+ * videobuf_pci_sg_memory */
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ dmabuf->vmalloc = (void *) vb->baddr;
+
+ /* Physical address */
+ dmabuf->bus_addr =
+ (dma_addr_t) omap_vout_uservirt_to_phys(vb->baddr);
+ }
+
+ if (!rotation_enabled(vout)) {
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+
+ vout->queued_buf_addr[vb->i] = (u8 *) dmabuf->bus_addr;
+ return 0;
+ }
+#ifndef CONFIG_ARCH_OMAP4
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf->bus_addr, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+#endif
+#else /* TILER to be used */
+
+ /* Here, we need to use the physical addresses given by Tiler:
+ */
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ vout->queued_buf_addr[vb->i] = (u8 *) dmabuf->bus_addr;
+ vout->queued_buf_uv_addr[vb->i] = (u8 *) dmabuf->vmalloc;
+
+#endif
+ return 0;
+}
+
+/* Buffer queue funtion will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed. */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ /* Driver is also maintainig a queue. So enqueue buffer in the driver
+ * queue */
+ list_add_tail(&vb->queue, &vout->dma_queue);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/* Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return;
+}
+
+/*
+ * File operations
+ */
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count--;
+}
+
+static struct vm_operations_struct omap_vout_vm_ops = {
+ .open = omap_vout_vm_open,
+ .close = omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+#ifndef TILER_ALLOCATE_V4L2
+ unsigned long size = (vma->vm_end - vma->vm_start);
+ unsigned long start = vma->vm_start;
+ struct page *cpage;
+#endif
+ int i;
+ void *pos;
+ struct videobuf_dmabuf *dmabuf = NULL;
+
+ int j = 0, k = 0, m = 0, p = 0, m_increment = 0;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+ vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &omap_vout_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+
+ pos = (void *)(dmabuf->bus_addr);
+#ifndef TILER_ALLOCATE_V4L2
+ while (size > 0) {
+ cpage = pfn_to_page(((unsigned int) pos) >> PAGE_SHIFT);
+ if (vm_insert_page(vma, start, cpage)) {
+ printk(KERN_ERR "vout_mmap: Failed to insert bus_addr"
+ "page to VMA \n");
+ return -EAGAIN;
+ }
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+#else /* Tiler remapping */
+ pos = (void *) dmabuf->bus_addr;
+ /* get line width */
+ /* for NV12, Y buffer is 1bpp*/
+ if (OMAP_DSS_COLOR_NV12 == vout->dss_mode) {
+ p = (vout->pix.width +
+ TILER_PAGE - 1) & ~(TILER_PAGE - 1);
+ m_increment = 64 * TILER_WIDTH;
+ } else {
+ p = (vout->pix.width * vout->bpp +
+ TILER_PAGE - 1) & ~(TILER_PAGE - 1);
+
+ if (vout->bpp > 1)
+ m_increment = 2*64*TILER_WIDTH;
+ else
+ m_increment = 64 * TILER_WIDTH;
+ }
+
+ for (j = 0; j < vout->pix.height; j++) {
+ /* map each page of the line */
+ #if 0
+ if (0)
+ printk(KERN_NOTICE
+ "Y buffer %s::%s():%d: vm_start+%d = 0x%lx,"
+ "dma->vmalloc+%d = 0x%lx, w=0x%x\n",
+ __FILE__, __func__, __LINE__,
+ k, vma->vm_start + k, m,
+ (pos + m), p);
+ #endif
+ vma->vm_pgoff =
+ ((unsigned long)pos + m) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ ((unsigned long)pos + m) >> PAGE_SHIFT,
+ p, vma->vm_page_prot))
+ return -EAGAIN;
+ k += p;
+ m += m_increment;
+ }
+ m = 0;
+
+ /* UV Buffer in case of NV12 format */
+ if (OMAP_DSS_COLOR_NV12 == vout->dss_mode) {
+ pos = dmabuf->vmalloc;
+ /* UV buffer is 2 bpp, but half size, so p remains */
+ m_increment = 2*64*TILER_WIDTH;
+
+ /* UV buffer is height / 2*/
+ for (j = 0; j < vout->pix.height / 2; j++) {
+ /* map each page of the line */
+ #if 0
+ if (0)
+ printk(KERN_NOTICE
+ "UV buffer %s::%s():%d: vm_start+%d = 0x%lx,"
+ "dma->vmalloc+%d = 0x%lx, w=0x%x\n",
+ __FILE__, __func__, __LINE__,
+ k, vma->vm_start + k, m,
+ (pos + m), p);
+ #endif
+ vma->vm_pgoff =
+ ((unsigned long)pos + m) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ ((unsigned long)pos + m) >> PAGE_SHIFT,
+ p, vma->vm_page_prot))
+ return -EAGAIN;
+ k += p;
+ m += m_increment;
+ }
+ }
+
+#endif
+ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+ vout->mmap_count++;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q;
+ unsigned int t;
+ struct omapvideo_info *ovid;
+ unsigned int r;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+ ovid = &vout->vid_info;
+
+ if (!vout)
+ return 0;
+ q = &vout->vbq;
+
+ /* Disable all the overlay managers connected with this interface */
+ for (t = 0; t < ovid->num_overlays; t++) {
+ struct omap_overlay *ovl = ovid->overlays[t];
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ovl->set_overlay_info(ovl, &info);
+ }
+
+ }
+ /* Turn off the pipeline */
+ r = omapvid_apply_changes(vout);
+ if (r)
+ printk(KERN_WARNING VOUT_NAME "Unable to apply changes\n");
+
+ /* Free all buffers */
+
+#ifndef TILER_ALLOCATE_V4L2
+ omap_vout_free_allbuffers(vout);
+#else
+ omap_vout_free_tiler_buffers(vout);
+#endif
+
+ videobuf_mmap_free(q);
+
+ /* Even if apply changes fails we should continue
+ freeing allocated memeory */
+ if (vout->streaming) {
+ u32 mask = 0;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_FRAMEDONE |
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+ vout->streaming = 0;
+
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+
+ }
+
+ if (vout->mmap_count != 0)
+ vout->mmap_count = 0;
+
+ vout->opened -= 1;
+ file->private_data = NULL;
+
+ if (vout->buffer_allocated)
+ videobuf_mmap_free(q);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return r;
+}
+
+static int omap_vout_open(struct file *file)
+{
+ struct omap_vout_device *vout = NULL;
+ struct videobuf_queue *q;
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+ if (vout == NULL)
+ return -ENODEV;
+
+ /* for now, we only support single open */
+ if (vout->opened)
+ return -EBUSY;
+
+ vout->opened += 1;
+
+ file->private_data = vout;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+ video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+ video_vbq_ops.buf_release = omap_vout_buffer_release;
+ video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_sg_init(q, &video_vbq_ops, NULL, &vout->vbq_lock,
+ vout->type, V4L2_FIELD_NONE, sizeof
+ (struct videobuf_buffer), vout);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+/* V4L2 ioctls */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap_vout_device *vout = fh;
+
+ strlcpy(cap->driver, VOUT_NAME,
+ sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+
+ f->fmt.pix = vout->pix;
+ return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+
+ omap_vout_try_format(&f->fmt.pix);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+ int bpp;
+ int r;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+ timing = &ovl->manager->device->panel.timings;
+
+/* TODO: check if TILER ADAPTATION is needed here. */
+ /* We dont support RGB24-packed mode if vrfb rotation
+ * is enabled*/
+ if ((rotation_enabled(vout)) &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+
+ /* get the framebuffer parameters */
+
+#ifndef CONFIG_ARCH_OMAP4
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+#endif
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+#ifndef CONFIG_ARCH_OMAP4
+ }
+#endif
+ /* change to samller size is OK */
+
+ bpp = omap_vout_try_format(&f->fmt.pix);
+ if (V4L2_PIX_FMT_NV12 == f->fmt.pix.pixelformat)
+ f->fmt.pix.sizeimage = f->fmt.pix.width *
+ f->fmt.pix.height * 3/2;
+ else
+ f->fmt.pix.sizeimage = f->fmt.pix.width *
+ f->fmt.pix.height * bpp;
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+ vout->vrfb_bpp = 1;
+
+ /* If YUYV then vrfb bpp is 2, for others its 1 */
+ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+ V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+ vout->vrfb_bpp = 2;
+
+ /* set default crop and win */
+ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /* Save the changes in the overlay strcuture */
+ r = omapvid_init(vout, 0, 0);
+ if (r) {
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&vout->lock);
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int err = -EINVAL;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ err = omap_vout_try_window(&vout->fbuf, win);
+
+ if (err)
+ return err;
+
+ if (vout->vid == OMAP_VIDEO1)
+ win->global_alpha = 255;
+ else
+ win->global_alpha = f->fmt.win.global_alpha;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+ int err = -EINVAL;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_window *win = &f->fmt.win;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ err = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+ if (err) {
+ mutex_unlock(&vout->lock);
+ return err;
+ }
+ /* Video1 plane does not support global alpha for OMAP 2/3 */
+ if ((cpu_is_omap24xx() || cpu_is_omap34xx()) &&
+ ovl->id == OMAP_DSS_VIDEO1)
+ vout->win.global_alpha = 255;
+ else
+ vout->win.global_alpha = f->fmt.win.global_alpha;
+
+ vout->win.chromakey = f->fmt.win.chromakey;
+ mutex_unlock(&vout->lock);
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_overlay_manager_info info;
+ struct v4l2_window *win = &f->fmt.win;
+ u32 key_value = 0;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ win->w = vout->win.w;
+ win->field = vout->win.field;
+ win->global_alpha = vout->win.global_alpha;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ key_value = info.trans_key;
+ }
+ win->chromakey = key_value;
+ return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cropcap)
+{
+ struct omap_vout_device *vout = fh;
+ enum v4l2_buf_type type = cropcap->type;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ cropcap->type = type;
+ if (type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Width and height are always even */
+ cropcap->bounds.width = pix->width & ~1;
+ cropcap->bounds.height = pix->height & ~1;
+
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
+ cropcap->pixelaspect.numerator = 1;
+ cropcap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *crop)
+{
+ struct omap_vout_device *vout = fh;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ crop->c = vout->crop;
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh,
+ struct v4l2_crop *crop)
+{
+ struct omap_vout_device *vout = fh;
+ int err = -EINVAL;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+
+ /* Currently we only allow changing the crop position while
+ streaming. */
+ if (vout->streaming &&
+ (crop->c.height != vout->crop.height ||
+ crop->c.width != vout->crop.width))
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ err = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &crop->c);
+ mutex_unlock(&vout->lock);
+ return err;
+ } else {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
+ break;
+ case V4L2_CID_BG_COLOR:
+ v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
+ break;
+ case V4L2_CID_VFLIP:
+ v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
+ default:
+ ctrl->name[0] = '\0';
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
+{
+ struct omap_vout_device *vout = fh;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ctrl->value = vout->control[0].value;
+ return 0;
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay_manager_info info;
+ struct omap_overlay *ovl;
+ ovl = vout->vid_info.overlays[0];
+
+ if (!ovl->manager || !ovl->manager->get_manager_info)
+ return -EINVAL;
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ ctrl->value = info.default_color;
+ return 0;
+ }
+
+ case V4L2_CID_VFLIP:
+ ctrl->value = vout->control[2].value;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ struct omap_vout_device *vout = fh;
+
+ switch (a->id) {
+ case V4L2_CID_ROTATE:
+ {
+ int rotation = a->value;
+
+ mutex_lock(&vout->lock);
+
+ if (rotation &&
+ vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+
+ if ((v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+ vout->mirror))) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+
+ vout->control[0].value = rotation;
+ mutex_unlock(&vout->lock);
+ return 0;
+ }
+ case V4L2_CID_BG_COLOR:
+ {
+ unsigned int color = a->value;
+ struct omap_overlay_manager_info info;
+ struct omap_overlay *ovl;
+ ovl = vout->vid_info.overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.default_color = color;
+ if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+
+ vout->control[1].value = color;
+ mutex_unlock(&vout->lock);
+ return 0;
+ }
+ case V4L2_CID_VFLIP:
+ {
+ unsigned int mirror = a->value;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mutex_lock(&vout->lock);
+
+ if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+ vout->mirror = mirror;
+ vout->control[2].value = mirror;
+ mutex_unlock(&vout->lock);
+ return 0;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ unsigned int i;
+#ifndef TILER_ALLOCATE_V4L2
+ unsigned int num_buffers = 0;
+#endif
+ int ret = 0;
+ struct videobuf_dmabuf *dmabuf = NULL;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "entered REQbuf: \n");
+
+ if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+ return -EINVAL;
+ /* if memory is not mmp or userptr
+ return error */
+ if ((V4L2_MEMORY_MMAP != req->memory) &&
+ (V4L2_MEMORY_USERPTR != req->memory))
+ return -EINVAL;
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ mutex_unlock(&vout->lock);
+ return -EBUSY;
+ }
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ mutex_unlock(&vout->lock);
+ return -EBUSY;
+ }
+
+#ifndef TILER_ALLOCATE_V4L2
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_VIDEO3 == vout->vid)
+ num_buffers = video3_numbuffers;
+ else
+#endif
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ omap_vout_free_buffer((u32)dmabuf->vmalloc,
+ dmabuf->bus_addr, vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+#else /* TILER_ALLOCATE_V4L2*/
+ omap_vout_tiler_buffer_free(vout, vout->buffer_allocated, 0);
+ vout->buffer_allocated = 0;
+
+#endif /* TILER_ALLOCATE_V4L2*/
+ videobuf_mmap_free(q);
+
+
+ } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+ if (vout->buffer_allocated) {
+ videobuf_mmap_free(q);
+ for (i = 0; i < vout->buffer_allocated; i++) {
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ vout->buffer_allocated = 0;
+ }
+ }
+ /*store the memory type in data structure */
+ vout->memory = req->memory;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+
+ /* call videobuf_reqbufs api */
+ ret = videobuf_reqbufs(q, req);
+ if (ret < 0) {
+ mutex_unlock(&vout->lock);
+ return ret;
+ }
+
+ vout->buffer_allocated = req->count;
+ for (i = 0; i < req->count; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+#ifdef CONFIG_ARCH_OMAP4
+ if (V4L2_PIX_FMT_NV12 == vout->pix.pixelformat) {
+#ifndef TILER_ALLOCATE_V4L2
+ dmabuf->vmalloc = (void *) omapvid_get_uvbase_nv12(
+ vout->buf_phy_addr[i],
+ vout->pix.height,
+ vout->pix.width);
+#else
+ dmabuf->vmalloc = (void *) vout->buf_phy_uv_addr[i];
+#endif /* TILER_ALLOCATE_V4L2 */
+ } else
+ dmabuf->vmalloc = NULL;
+#else
+ dmabuf->vmalloc = (void *) vout->buf_virt_addr[i];
+#endif
+ dmabuf->bus_addr = (dma_addr_t) vout->buf_phy_addr[i];
+ dmabuf->sglen = 1;
+ }
+ mutex_unlock(&vout->lock);
+ return 0;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+
+ return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buffer)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "entered qbuf: buffer address: %x \n", (unsigned int) buffer);
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+ (buffer->index >= vout->buffer_allocated) ||
+
+ (q->bufs[buffer->index]->memory != buffer->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buffer->memory) {
+ if ((buffer->length < vout->pix.sizeimage) ||
+ (0 == buffer->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+#ifndef CONFIG_ARCH_OMAP4
+ if ((rotation_enabled(vout)) &&
+ vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+ printk(KERN_WARNING VOUT_NAME
+ "DMA Channel not allocated for Rotation\n");
+ return -EINVAL;
+ }
+#endif
+ ret = videobuf_qbuf(q, buffer);
+ /* record buffer offset from crop window */
+ if (omap_vout_calculate_offset(vout, buffer->index)) {
+ printk(KERN_ERR "Could not calculate buffer offset\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "entered DQbuf: buffer address: %x \n", (unsigned int) b);
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ return ret;
+}
+
+static int vidioc_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type i)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ u32 addr = 0, uv_addr = 0;
+ int r = 0;
+ int t;
+ struct omapvideo_info *ovid = &vout->vid_info;
+ u32 mask = 0;
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ mutex_unlock(&vout->lock);
+ return -EBUSY;
+ }
+
+ r = videobuf_streamon(q);
+ if (r < 0) {
+ mutex_unlock(&vout->lock);
+ return r;
+ }
+
+ if (list_empty(&vout->dma_queue)) {
+ mutex_unlock(&vout->lock);
+ return -EIO;
+ }
+ /* Get the next frame from the buffer queue */
+ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&vout->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vout->field_id = 0;
+
+ /* set flag here. Next QBUF will start DMA */
+ vout->streaming = 1;
+
+ vout->first_int = 1;
+
+ addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ + vout->cropped_offset[vout->cur_frm->i];
+#ifdef CONFIG_ARCH_OMAP4
+ uv_addr = (unsigned long) vout->queued_buf_uv_addr[vout->cur_frm->i]
+ + vout->cropped_uv_offset[vout->cur_frm->i];
+#endif
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_FRAMEDONE |
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2;
+
+ omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+ for (t = 0; t < ovid->num_overlays; t++) {
+ struct omap_overlay *ovl = ovid->overlays[t];
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 1;
+ info.paddr = addr;
+ info.p_uv_addr = uv_addr;
+ if (ovl->set_overlay_info(ovl, &info))
+ return -EINVAL;
+ }
+ }
+
+ /* First save the configuration in ovelray structure */
+ r = omapvid_init(vout, addr, uv_addr);
+ if (r)
+ printk(KERN_ERR VOUT_NAME "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ r = omapvid_apply_changes(vout);
+ if (r)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+
+ mutex_unlock(&vout->lock);
+ return r;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type i)
+{
+ struct omap_vout_device *vout = fh;
+ int t, r = 0;
+ struct omapvideo_info *ovid = &vout->vid_info;
+ u32 mask = 0;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ vout->streaming = 0;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_FRAMEDONE |
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (t = 0; t < ovid->num_overlays; t++) {
+ struct omap_overlay *ovl = ovid->overlays[t];
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r) {
+ printk(KERN_ERR VOUT_NAME "failed to \
+ update overlay info\n");
+ return r;
+ }
+ }
+ }
+
+ /* Turn of the pipeline */
+ r = omapvid_apply_changes(vout);
+ if (r) {
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+ return r;
+ }
+ INIT_LIST_HEAD(&vout->dma_queue);
+ videobuf_streamoff(&vout->vbq);
+ videobuf_queue_cancel(&vout->vbq);
+ return 0;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ int enable = 0;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* OMAP DSS doesn't support Source and Destination color
+ key together */
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+ return -EINVAL;
+ /* OMAP DSS Doesn't support the Destination color key
+ and alpha blending together */
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+ return -EINVAL;
+
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+ if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
+ else
+ enable = 0;
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+ if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 1;
+ } else {
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 0;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.alpha_enabled = enable;
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ a->flags = 0x0;
+
+ a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+ | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+ a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+ a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.alpha_enabled)
+ a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ .mmap = omap_vout_mmap,
+ .open = omap_vout_open,
+ .release = omap_vout_release,
+};
+
+/* Init functions used during driver intitalization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+ struct v4l2_pix_format *pix;
+ struct video_device *vfd;
+ struct v4l2_control *control;
+ struct omap_dss_device *display =
+ vout->vid_info.overlays[0]->manager->device;
+
+ /* set the default pix */
+ pix = &vout->pix;
+
+ /* Set the default picture of QVGA */
+ pix->width = QQVGA_WIDTH;
+ pix->height = QQVGA_HEIGHT;
+
+ /* Default pixel format is RGB 5-6-5 */
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ pix->field = V4L2_FIELD_ANY;
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->priv = 0;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ vout->bpp = RGB565_BPP;
+ vout->fbuf.fmt.width = display->panel.timings.x_res;
+ vout->fbuf.fmt.height = display->panel.timings.y_res;
+
+ /* Set the data structures for the overlay parameters*/
+ vout->win.global_alpha = 255;
+ vout->fbuf.flags = 0;
+ vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+ vout->win.chromakey = 0;
+
+ omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /*Initialize the control variables for
+ rotation, flipping and background color. */
+ control = vout->control;
+ control[0].id = V4L2_CID_ROTATE;
+ control[0].value = 0;
+ vout->rotation = 0;
+ vout->mirror = 0;
+
+ control[1].id = V4L2_CID_BG_COLOR;
+ control[1].value = 0;
+
+ control[2].id = V4L2_CID_HFLIP;
+ control[2].value = 0;
+ vout->vrfb_bpp = 2;
+
+ /* initialize the video_device struct */
+ vfd = vout->vfd = video_device_alloc();
+
+ if (!vfd) {
+ printk(KERN_ERR VOUT_NAME ": could not allocate"
+ " video device struct\n");
+ return -ENOMEM;
+ }
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &vout_ioctl_ops;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+ vfd->vfl_type = VFL_TYPE_GRABBER;
+
+ /* need to register for a VID_HARDWARE_* ID in videodev.h */
+ vfd->fops = &omap_vout_fops;
+ mutex_init(&vout->lock);
+
+ vfd->minor = -1;
+ return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+ int vid_num)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+ struct omap_vout_device *vout;
+ struct video_device *vfd;
+#ifndef TILER_ALLOCATE_V4L2 /* TODO: related to rotation */
+ int i, j, r = 0;
+ unsigned numbuffers;
+ int image_width, image_height;
+ int static_vrfb_allocation = 0, vrfb_num_bufs = 4;
+#endif
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+#ifndef TILER_ALLOCATE_V4L2
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_VIDEO3 == vid_num) {
+ numbuffers = video3_numbuffers;
+ vout->buffer_size = video3_bufsize;
+ } else
+#endif
+ {
+ numbuffers = (vid_num == 0) ? video1_numbuffers
+ : video2_numbuffers;
+ vout->buffer_size = (vid_num == 0) ? video1_bufsize
+ : video2_bufsize;
+ }
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Buffer Size = %d\n",
+ vout->buffer_size);
+
+ for (i = 0; i < numbuffers; i++) {
+ vout->buf_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->buffer_size,
+ (u32 *) &vout->buf_phy_addr[i]);
+ if (!vout->buf_virt_addr[i]) {
+ numbuffers = i;
+ r = -ENOMEM;
+ goto free_buffers;
+ }
+ vout->cropped_offset[i] = 0;
+#ifdef CONFIG_ARCH_OMAP4
+ vout->cropped_uv_offset[i] = 0;
+#endif
+ }
+#ifndef CONFIG_ARCH_OMAP4
+ for (i = 0; i < 4; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ printk(KERN_INFO VOUT_NAME ": VRFB Region allocation "
+ "for rotation failed\n");
+ r = -ENOMEM;
+ break;
+ }
+ }
+ if (r == -ENOMEM) {
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+
+ goto free_buffers;
+ }
+
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ r = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (r < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ printk(KERN_INFO VOUT_NAME ": DMA Channel not alloted "
+ "for video%d [v4l2]\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* Allocate VRFB buffers if selected through bootargs */
+ static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ r = -ENOMEM;
+ goto free_buffers;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+#endif /* CONFIG_ARCH_OMAP4 */
+
+free_buffers:
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buf_phy_addr[i], vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ return r;
+#endif /* TILER_ALLOCATE_V4L2 */
+
+ /* NOTE: OMAP4, if TILER allocation, then nothing to pre-allocate */
+ return 0;
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+ int r = 0, k;
+ struct omap_vout_device *vout;
+ struct video_device *vfd = NULL;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+
+ for (k = 0; k < pdev->num_resources; k++) {
+
+ vout = kmalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+ if (!vout) {
+ printk(KERN_ERR VOUT_NAME
+ ": could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ memset(vout, 0, sizeof(struct omap_vout_device));
+
+ vout->vid = k;
+ vid_dev->vouts[k] = vout;
+ vout->vid_dev = vid_dev;
+#ifndef CONFIG_ARCH_OMAP4
+ /* Select video2 if only 1 overlay is controlled by V4L2 */
+ if (pdev->num_resources == 1)
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+ else
+ /* Else select video1 and video2 one by one. */
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+#else
+ vout->vid_info.overlays[0] =
+ vid_dev->overlays[
+ k + (OMAP4_MAX_OVERLAYS - pdev->num_resources)];
+#endif
+ vout->vid_info.num_overlays = 1;
+ vout->vid_info.id = k + 1;
+ vid_dev->num_videos++;
+
+ /* Setup the default configuration for the video devices
+ */
+ if (omap_vout_setup_video_data(vout) != 0) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ /* Allocate default number of buffers for the video streaming
+ * and reserve the VRFB space for rotation
+ */
+ if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+ r = -ENOMEM;
+ goto error1;
+ }
+
+ /* Register the Video device with V4L2
+ */
+ vfd = vout->vfd;
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ printk(KERN_ERR VOUT_NAME ": could not register "
+ "Video for Linux device\n");
+ vfd->minor = -1;
+ r = -ENODEV;
+ goto error2;
+ }
+ video_set_drvdata(vfd, vout);
+
+ /* Configure the overlay structure */
+ r = omapvid_init(vid_dev->vouts[k], 0, 0);
+
+ if (r)
+ goto error2;
+ else
+ goto success;
+error2:
+#ifndef CONFIG_ARCH_OMAP4
+ omap_vout_release_vrfb(vout);
+#endif
+ omap_vout_free_buffers(vout);
+error1:
+ video_device_release(vfd);
+error:
+ kfree(vout);
+ return r;
+
+success:
+ printk(KERN_INFO VOUT_NAME ": registered and initialized "
+ "video device %d [v4l2]\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ }
+ return -ENODEV;
+
+}
+/* Driver functions */
+static int omap_vout_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+ int k;
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+ for (k = 0; k < vid_dev->num_displays; k++) {
+ if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+ vid_dev->displays[k]->disable(vid_dev->displays[k]);
+
+ omap_dss_put_device(vid_dev->displays[k]);
+ }
+ kfree(vid_dev);
+ return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+ int r = 0, i;
+ struct omap2video_device *vid_dev = NULL;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *def_display;
+ struct omap_dss_device *dssdev;
+
+ if (pdev->num_resources == 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ r = -ENODEV;
+ return r;
+ }
+
+ vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+ if (vid_dev == NULL) {
+ r = -ENOMEM;
+ return r;
+ }
+
+ vid_dev->num_displays = 0;
+ dssdev = NULL;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+ vid_dev->displays[vid_dev->num_displays++] = dssdev;
+ }
+
+ if (vid_dev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ r = -EINVAL;
+ goto error0;
+ }
+
+ vid_dev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < vid_dev->num_overlays; i++)
+ vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+ vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < vid_dev->num_managers; i++)
+ vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ /* Get the Video1, video2 (and Video3 for OMAP4) overlay.
+ * Setup the Display attached to that overlays
+ */
+ for (i = 1; i < (NUM_OF_VIDEO_CHANNELS + 1); i++) {
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device) {
+ def_display = ovl->manager->device;
+ } else {
+ dev_warn(&pdev->dev, "cannot find display\n");
+ def_display = NULL;
+ }
+ if (def_display) {
+ r = def_display->enable(def_display);
+ if (r) {
+ /* Here we are not considering a error
+ * as display may be enabled by frame
+ * buffer driver
+ */
+ dev_warn(&pdev->dev,
+ "'%s' Display already enabled\n",
+ def_display->name);
+ }
+ /* set the update mode */
+ if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 1);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+#else /* MANUAL_UPDATE */
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 0);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_MANUAL);
+#endif
+ } else {
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+ }
+ }
+ }
+
+ if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+ printk(KERN_ERR VOUT_NAME "v4l2_device_register failed\n");
+ return -ENODEV;
+ }
+
+ r = omap_vout_create_video_devices(pdev);
+ if (r)
+ goto error0;
+
+ for (i = 0; i < vid_dev->num_displays; i++) {
+ struct omap_dss_device *display = vid_dev->displays[i];
+
+ if (display->update)
+ display->update(display, 0, 0,
+ display->panel.timings.x_res,
+ display->panel.timings.y_res);
+ }
+ return 0;
+
+error0:
+ kfree(vid_dev);
+ return r;
+}
+
+static struct platform_driver omap_vout_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .probe = omap_vout_probe,
+ .remove = omap_vout_remove,
+};
+
+void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+ int r;
+ struct timeval timevalue = {0};
+ struct omap_vout_device *vout =
+ (struct omap_vout_device *) arg;
+ u32 addr, uv_addr, flags;
+
+#if !(CONFIG_OMAP2_DSS_HDMI)
+ u32 fid;
+#endif
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *cur_display;
+ int irq = 0;
+ if (!vout->streaming)
+ return;
+
+ ovid = &(vout->vid_info);
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return;
+ cur_display = ovl->manager->device;
+
+ if (cur_display->channel == OMAP_DSS_CHANNEL_LCD)
+ irq = DISPC_IRQ_FRAMEDONE;
+
+#ifdef CONFIG_ARCH_OMAP4
+ else if (cur_display->channel == OMAP_DSS_CHANNEL_LCD2)
+ irq = DISPC_IRQ_FRAMEDONE2;
+#endif
+ spin_lock_irqsave(&vout->vbq_lock, flags);
+ do_gettimeofday(&timevalue);
+
+ switch (cur_display->type) {
+
+ case OMAP_DISPLAY_TYPE_DSI:
+ if (!(irqstatus & irq)) {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ break;
+
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!(irqstatus & (DISPC_IRQ_VSYNC | DISPC_IRQ_VSYNC2))) {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+#ifdef CONFIG_PANEL_PICO_DLP
+ if (dispc_go_busy(OMAP_DSS_CHANNEL_LCD2)) {
+ spin_unlock(&vout->vbq_lock);
+ printk("dpi busy %d !! \n", cur_display->type);
+ return;
+ }
+#endif
+ break;
+
+#if CONFIG_OMAP2_DSS_HDMI
+
+ case OMAP_DISPLAY_TYPE_HDMI:
+ if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN)) {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ break;
+#else
+ case OMAP_DISPLAY_TYPE_VENC:
+ if (vout->first_int) {
+ vout->first_int = 0;
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD) {
+ fid = 1;
+ } else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN) {
+ fid = 0;
+ } else {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ fid = 1;
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (0 == fid)
+ vout->field_id = fid;
+
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm) {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ goto end;
+ } else if (1 == fid) {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm)) {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+ goto venc;
+ }
+#endif
+
+ default:
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
+
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue)) {
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ return;
+ }
+
+#if !(CONFIG_OMAP2_DSS_HDMI)
+venc:
+#endif
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = (unsigned long)
+ vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset[vout->next_frm->i];
+#ifdef CONFIG_ARCH_OMAP4
+ uv_addr = (unsigned long)vout->queued_buf_uv_addr[
+ vout->next_frm->i]
+ + vout->cropped_uv_offset[vout->next_frm->i];
+#endif
+
+ /* First save the configuration in ovelray structure */
+ r = omapvid_init(vout, addr, uv_addr);
+ if (r)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ r = omapvid_apply_changes(vout);
+ if (r)
+ printk(KERN_ERR VOUT_NAME
+ "failed to change mode\n");
+
+#ifdef CONFIG_PANEL_PICO_DLP
+ if (sysfs_streq(cur_display->name, "pico_DLP"))
+ dispc_go(OMAP_DSS_CHANNEL_LCD2);
+#endif
+
+#if !(CONFIG_OMAP2_DSS_HDMI)
+end:
+#endif
+
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+}
+
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+
+ if (!vout)
+ return;
+ vfd = vout->vfd;
+
+ if (vfd) {
+ if (vfd->minor == -1) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+
+#ifndef CONFIG_ARCH_OMAP4
+ omap_vout_release_vrfb(vout);
+#endif
+ omap_vout_free_buffers(vout);
+#ifdef CONFIG_ARCH_OMAP4
+#ifdef TILER_ALLOCATE_V4L2
+ omap_vout_free_tiler_buffers(vout);
+ /* TODO: check if this needs to be done? */
+#endif
+#else
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+#endif
+ kfree(vout);
+}
+
+static int __init omap_vout_init(void)
+{
+
+ if (platform_driver_register(&omap_vout_driver) != 0) {
+ printk(KERN_ERR VOUT_NAME ": could not register \
+ Video driver\n");
+ return -EINVAL;
+ }
+ mutex_init(&my_lock);
+ return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+ platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
new file mode 100644
index 000000000000..455852f483b5
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -0,0 +1,164 @@
+/*
+ * drivers/media/video/omap/omap_voutdef.h
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <plat/display.h>
+
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+#define TILE_SIZE 32
+#define YUYV_VRFB_BPP 2
+#define RGB_VRFB_BPP 1
+#define MAX_CID 3
+#define MAC_VRFB_CTXS 4
+
+#ifdef CONFIG_ARCH_OMAP4 /* TODO: OMAP4 update displays and managers */
+#define MAX_VOUT_DEV 3
+#define MAX_OVLS 4
+#else
+#define MAX_VOUT_DEV 2
+#define MAX_OVLS 3
+#endif
+#define MAX_DISPLAYS 4
+#define MAX_MANAGERS 3
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+ dss_rotation_0_degree = 0,
+ dss_rotation_90_degree = 1,
+ dss_rotation_180_degree = 2,
+ dss_rotation_270_degree = 3,
+};
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+ int dev_id;
+ int dma_ch;
+ int req_status;
+ int tx_status;
+ wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+ int id;
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+};
+
+struct omap2video_device {
+ struct mutex mtx;
+
+ int state;
+
+ struct v4l2_device v4l2_dev;
+ int num_videos;
+ struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+ int num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ int num_managers;
+ struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+ struct omapvideo_info vid_info;
+ struct video_device *vfd;
+ struct omap2video_device *vid_dev;
+ int vid;
+ int opened;
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated;
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+ /* keep buffer info across opens */
+ unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+ /* keep which buffers we actually allocated (via tiler) */
+ unsigned long buf_phy_uv_addr_alloced[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr_alloced[VIDEO_MAX_FRAME];
+
+/* NV12 support*/
+ unsigned long buf_phy_uv_addr[VIDEO_MAX_FRAME];
+ u8 *queued_buf_uv_addr[VIDEO_MAX_FRAME];
+
+ enum omap_color_mode dss_mode;
+
+ /* we don't allow to request new buffer when old buffers are
+ * still mmaped
+ */
+ int mmap_count;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+ unsigned long field_count; /* field counter for videobuf_buffer */
+
+ /* non-NULL means streaming is in progress. */
+ bool streaming;
+
+ struct v4l2_pix_format pix;
+ struct v4l2_rect crop;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+
+ /* Lock to protect the shared data structures in ioctl */
+ struct mutex lock;
+
+ /* V4L2 control structure for different control id */
+ struct v4l2_control control[MAX_CID];
+ enum dss_rotation rotation;
+ bool mirror;
+ int flicker_filter;
+ /* V4L2 control structure for different control id */
+
+ int bpp; /* bytes per pixel */
+ int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+ struct vid_vrfb_dma vrfb_dma_tx;
+ unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+ unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+ struct vrfb vrfb_context[MAC_VRFB_CTXS];
+ bool vrfb_static_allocation;
+ unsigned int smsshado_size;
+ unsigned char pos;
+
+ int ps, vr_ps, line_length, first_int, field_id;
+ enum v4l2_memory memory;
+ struct videobuf_buffer *cur_frm, *next_frm;
+ struct list_head dma_queue;
+ u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ u32 cropped_offset[VIDEO_MAX_FRAME];
+#ifdef CONFIG_ARCH_OMAP4
+ u32 cropped_uv_offset[VIDEO_MAX_FRAME];
+#endif
+ s32 tv_field1_offset;
+ void *isr_handle;
+
+ /* Buffer queue variables */
+ struct omap_vout_device *vout;
+ enum v4l2_buf_type type;
+ struct videobuf_queue vbq;
+ int io_allowed;
+
+};
+#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
new file mode 100644
index 000000000000..a6ce900eafe7
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -0,0 +1,259 @@
+/*
+ * drivers/media/video/omap/omap_voutlib.c
+ *
+ * Copyright (C) 2005-2009 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <plat/cpu.h>
+
+MODULE_AUTHOR("Texas Instruments.");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf. The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display. The default cropping rectangle is centered in
+ * the image. All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+ crop->width = (pix->width < fbuf->fmt.width) ?
+ pix->width : fbuf->fmt.width;
+ crop->height = (pix->height < fbuf->fmt.height) ?
+ pix->height : fbuf->fmt.height;
+ crop->width &= ~1;
+ crop->height &= ~1;
+ crop->left = ((pix->width - crop->width) >> 1) & ~1;
+ crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if succesful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ struct v4l2_rect try_win;
+
+ /* make a working copy of the new_win rectangle */
+ try_win = new_win->w;
+
+ /* adjust the preview window so it fits on the display by clipping any
+ * offscreen areas
+ */
+ if (try_win.left < 0) {
+ try_win.width += try_win.left;
+ try_win.left = 0;
+ }
+ if (try_win.top < 0) {
+ try_win.height += try_win.top;
+ try_win.top = 0;
+ }
+ try_win.width = (try_win.width < fbuf->fmt.width) ?
+ try_win.width : fbuf->fmt.width;
+ try_win.height = (try_win.height < fbuf->fmt.height) ?
+ try_win.height : fbuf->fmt.height;
+ if (try_win.left + try_win.width > fbuf->fmt.width)
+ try_win.width = fbuf->fmt.width - try_win.left;
+ if (try_win.top + try_win.height > fbuf->fmt.height)
+ try_win.height = fbuf->fmt.height - try_win.top;
+ try_win.width &= ~1;
+ try_win.height &= ~1;
+
+ if (try_win.width <= 0 || try_win.height <= 0)
+ return -EINVAL;
+
+ /* We now have a valid preview window, so go with it */
+ new_win->w = try_win;
+ new_win->field = V4L2_FIELD_ANY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The image cropping window in crop
+ * will also be adjusted if necessary. Preference is given to keeping the
+ * the window as close to the requested configuration as possible. If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if succesful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ int err;
+
+ err = omap_vout_try_window(fbuf, new_win);
+ if (err)
+ return err;
+
+ /* update our preview window */
+ win->w = new_win->w;
+ win->field = new_win->field;
+ win->chromakey = new_win->chromakey;
+
+ /* adjust the cropping window to allow for resizing limitations */
+ if ((crop->height/win->w.height) >= 4) {
+ /* The maximum vertical downsizing ratio is 4:1 */
+ crop->height = win->w.height * 4;
+ }
+ if ((crop->width/win->w.width) >= 4) {
+ /* The maximum horizontal downsizing ratio is 4:1 */
+ crop->width = win->w.width * 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration. The image render window in win will
+ * also be adjusted if necessary. The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant. If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios. If successful, crop
+ * and win are updated.
+ * Returns zero if succesful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+ struct v4l2_rect try_crop;
+ unsigned long vresize, hresize;
+
+ /* make a working copy of the new_crop rectangle */
+ try_crop = *new_crop;
+
+ /* adjust the cropping rectangle so it fits in the image */
+ if (try_crop.left < 0) {
+ try_crop.width += try_crop.left;
+ try_crop.left = 0;
+ }
+ if (try_crop.top < 0) {
+ try_crop.height += try_crop.top;
+ try_crop.top = 0;
+ }
+ try_crop.width = (try_crop.width < pix->width) ?
+ try_crop.width : pix->width;
+ try_crop.height = (try_crop.height < pix->height) ?
+ try_crop.height : pix->height;
+ if (try_crop.left + try_crop.width > pix->width)
+ try_crop.width = pix->width - try_crop.left;
+ if (try_crop.top + try_crop.height > pix->height)
+ try_crop.height = pix->height - try_crop.top;
+ try_crop.width &= ~1;
+ try_crop.height &= ~1;
+ if (try_crop.width <= 0 || try_crop.height <= 0)
+ return -EINVAL;
+
+ if (cpu_is_omap24xx() && crop->height != win->w.height) {
+ /* If we're resizing vertically, we can't support a crop width
+ * wider than 768 pixels on OMAP2.
+ */
+ if (try_crop.width > 768)
+ try_crop.width = 768;
+ }
+ /* vertical resizing */
+ vresize = (1024 * crop->height) / win->w.height;
+ if (vresize > 4096)
+ vresize = 4096;
+ else if (vresize == 0)
+ vresize = 1;
+ win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+ if (win->w.height == 0)
+ win->w.height = 2;
+ if (win->w.height + win->w.top > fbuf->fmt.height) {
+ /* We made the preview window extend below the bottom of the
+ * display, so clip it to the display boundary and resize the
+ * cropping height to maintain the vertical resizing ratio.
+ */
+ win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+ if (try_crop.height == 0)
+ try_crop.height = 2;
+ }
+ /* horizontal resizing */
+ hresize = (1024 * crop->width) / win->w.width;
+ if (hresize > 4096)
+ hresize = 4096;
+ else if (hresize == 0)
+ hresize = 1;
+ win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+ if (win->w.width == 0)
+ win->w.width = 2;
+ if (win->w.width + win->w.left > fbuf->fmt.width) {
+ /* We made the preview window extend past the right side of the
+ * display, so clip it to the display boundary and resize the
+ * cropping width to maintain the horizontal resizing ratio.
+ */
+ win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+ if (try_crop.width == 0)
+ try_crop.width = 2;
+ }
+
+ /* Check for resizing constraints */
+ if ((try_crop.height/win->w.height) >= 4) {
+ /* The maximum vertical downsizing ratio is 4:1 */
+ try_crop.height = win->w.height * 4;
+ }
+ if ((try_crop.width/win->w.width) >= 4) {
+ /* The maximum horizontal downsizing ratio is 4:1 */
+ try_crop.width = win->w.width * 4;
+ }
+
+ /* update our cropping rectangle and we're done */
+ *crop = try_crop;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf, crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display. The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win)
+{
+ /* crop defines the preview source window in the image capture
+ * buffer
+ */
+ omap_vout_default_crop(pix, fbuf, crop);
+
+ /* win defines the preview target window on the display */
+ win->w.width = crop->width;
+ win->w.height = crop->height;
+ win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+ win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
new file mode 100644
index 000000000000..8ef6e25b9e62
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -0,0 +1,34 @@
+/*
+ * drivers/media/video/omap/omap_voutlib.h
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+extern void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+extern int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf,
+ const struct v4l2_rect *new_crop);
+
+extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win);
+#endif /* #ifndef OMAP_LIB_H */
+
diff --git a/drivers/media/video/tiler/Kconfig b/drivers/media/video/tiler/Kconfig
new file mode 100644
index 000000000000..fabbb59a6c8d
--- /dev/null
+++ b/drivers/media/video/tiler/Kconfig
@@ -0,0 +1,6 @@
+config TILER_OMAP
+ tristate "OMAP TILER support"
+ default y
+ help
+ TILER driver for OMAP based boards.
+
diff --git a/drivers/media/video/tiler/Makefile b/drivers/media/video/tiler/Makefile
new file mode 100644
index 000000000000..e6dbe24ce9d3
--- /dev/null
+++ b/drivers/media/video/tiler/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_TILER_OMAP) += tcm/
+obj-$(CONFIG_TILER_OMAP) += tiler_omap.o
+tiler_omap-objs = tiler.o tiler_pack.o tiler_rot.o
+
diff --git a/drivers/media/video/tiler/tcm/Makefile b/drivers/media/video/tiler/tcm/Makefile
new file mode 100644
index 000000000000..bde1d9c31a83
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_TILER_OMAP) += tcm_omap.o
+tcm_omap-objs = tcm.o tcm_utils.o
+
diff --git a/drivers/media/video/tiler/tcm/tcm.c b/drivers/media/video/tiler/tcm/tcm.c
new file mode 100644
index 000000000000..9f4f7ced9588
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm.c
@@ -0,0 +1,1770 @@
+/*
+* tcm.c
+*
+* Author: Ravi Ramachandra <r.ramachandra@ti.com>
+*
+* Tiler 2D and 1D Container Management Algorithm.
+*
+* Copyright (C) 2009-2010 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+*
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include "tcm_pri.h"
+#include "tcm.h"
+#include "tcm_utils.h"
+
+/*********************************************
+ * POSITIONING & OPTIMIZATION TWEAKS
+*********************************************/
+
+/* Restricts scanning unecessary tiles */
+
+/*By enabling this X_SCAN LIMITER, we scan consective line only till the
+previous found x0, thus eliminating candidates for selection during scan only */
+#define X_SCAN_LIMITER
+/*By enabling this Y_SCAN_LIMITER we limit scan going down(or up) if we found
+that the candidate's y0 is same as scan area's start scan */
+#define Y_SCAN_LIMITER
+
+/* Enabling this will HARD restrict 1D to as specified in g_div_ln...'s y1 */
+/* #define RESTRICT_1D */
+
+/*********************************************/
+
+
+/* ********************************************
+ * GLOBALS
+ *********************************************/
+/* global Container that keeps a map of which tile is occupied
+and which tiler is not occupied */
+static struct tiler_page g_area_container[MAX_X_DIMMENSION][MAX_Y_DIMMENSION];
+
+/* This list keeps a track of all the allocations that happened in terms
+of area allocated, this list is checked for removing any allocations */
+struct area_spec_list *g_allocation_list;
+
+/*Vertical line divider between 64 and 32 aligned scan areas */
+struct area_spec g_div_ln_btw_64_and_32_align = {192, 0, 192, 96};
+
+/* Just some temp ID, which will roll over 32K */
+u32 g_id;
+
+static struct mutex g_mutex;
+
+
+/* Individual selection criteria for different scan areas */
+static s32 g_scan_criteria_l2r_t2b = CR_BIAS_HORIZONTAL;
+static s32 g_scan_criteria_l2r_b2t = CR_DIAGONAL_BALANCE;
+static s32 g_scan_criteria_r2l_t2b = CR_DIAGONAL_BALANCE;
+static s32 g_scan_criteria_r2l_b2t = CR_FIRST_FOUND;
+
+/*********************************************/
+
+
+/*********************************************
+ * LOCAL METHODS
+ *********************************************/
+static s32 scan_areas_and_find_fit(u16 w, u16 h, u16 stride,
+ struct area_spec *allocated_area);
+static s32 scan_l2r_t2b(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area);
+static s32 scan_r2l_t2b(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area);
+static s32 scan_l2r_b2t(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area);
+static s32 scan_r2l_b2t(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area);
+static s32 scan_r2l_b2t_one_dim(u32 num_of_pages, struct area_spec *scan_area,
+ struct area_spec *alloc_area);
+
+/* Support Infrastructure functions */
+static s32 check_fit_r_and_b(u16 w, u16 h, u16 left_x, u16 top_y);
+static s32 check_fit_r_one_dim(u16 x, u16 y, u32 num_of_pages, u16 *busy_x,
+ u16 *busy_y);
+static s32 select_candidate(IN u16 w, IN u16 h, IN u16 num_short_listed,
+ IN struct area_spec_list *short_listed, IN struct area_spec *scan_area,
+ IN s32 criteria, OUT struct area_spec *alloc_area);
+/* old selecte candidate will be deprecated after testing new one */
+static s32 get_nearness_factor(struct area_spec *scan_area,
+ struct area_spec *candidate, struct nearness_factor *nf);
+static s32 get_busy_neigh_stats(u16 width, u16 height,
+ struct area_spec *top_left_corner,
+ struct neighbour_stats *neighbour_stat);
+static s32 insert_area_with_tiler_page(struct area_spec *area,
+ struct tiler_page tile);
+static s32 insert_pages_with_tiler_page(struct area_spec *area,
+ struct tiler_page tile);
+
+/*********************************************/
+
+
+/**
+ * @description: Initializes tiler container.
+ *
+ * @input: None
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+s32 init_tiler(void)
+{
+ struct tiler_page init_tile;
+ struct area_spec area = {0};
+
+ init_tile.is_occupied = 0;
+ init_tile.parent_area.x0 = 0;
+ init_tile.parent_area.x1 = 0;
+ init_tile.parent_area.y0 = 0;
+ init_tile.parent_area.y1 = 0;
+ init_tile.reserved = 0;
+ init_tile.type = 0;
+
+ area.x1 = MAX_X_DIMMENSION - 1;
+ area.y1 = MAX_Y_DIMMENSION - 1;
+
+ mutex_init(&g_mutex);
+ MUTEX_LOCK(&g_mutex);
+ insert_area_with_tiler_page(&area, init_tile);
+ MUTEX_REL(&g_mutex);
+ return TilerErrorNone;
+}
+
+
+/**
+ * @description: DeInitializes tiler container.
+ * removes existing allocations
+ *
+ * @input: None
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+s32 deinit_tiler(void)
+{
+ struct tiler_page init_tile;
+ struct area_spec area = {0};
+ /* Cleaning all the entries in the list and marking tiler container
+ as free */
+
+ init_tile.is_occupied = 0;
+ init_tile.parent_area.x0 = 0;
+ init_tile.parent_area.x1 = 0;
+ init_tile.parent_area.y0 = 0;
+ init_tile.parent_area.y1 = 0;
+ init_tile.reserved = 0;
+ init_tile.type = 0;
+
+ area.x1 = MAX_X_DIMMENSION - 1;
+ area.y1 = MAX_Y_DIMMENSION - 1;
+
+ MUTEX_LOCK(&g_mutex);
+ insert_area_with_tiler_page(&area, init_tile);
+ clean_list(&g_allocation_list);
+ MUTEX_REL(&g_mutex);
+ mutex_destroy(&g_mutex);
+ return TilerErrorNone;
+}
+
+/**
+ * @description: Allocate 1d pages if the required number of pages are
+ * available in the container
+ *
+ * @input:num_of_pages to be allocated
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * allocated_pages contain co-ordinates of start and end Tiles(inclusive)
+ */
+s32 allocate_1d_pages(u32 num_of_pages, struct area_spec *allocated_pages)
+{
+ s32 ret = TilerErrorNone;
+ struct area_spec scan_area = {0, 0, 0, 0};
+ struct tiler_page tile;
+
+ memset(&tile, 0, sizeof(struct tiler_page));
+ P1("Allocate %d pages\n", num_of_pages);
+
+ /* Basic checks */
+ if (allocated_pages == NULL) {
+ PE("NULL input found\n");
+ return TilerErrorInvalidArg;
+ }
+ /* Delibrately checking outside to give out relavent error info */
+ if (num_of_pages > (MAX_X_DIMMENSION * MAX_Y_DIMMENSION)) {
+ PE("num_of_pages exceed maximum pages available(%d)\n",
+ (MAX_X_DIMMENSION * MAX_Y_DIMMENSION));
+ return TilerErrorNoRoom;
+ }
+ MUTEX_LOCK(&g_mutex);
+#ifdef RESTRICT_1D
+ /*scan within predefine 1D boundary */
+ assign(&scan_area, (MAX_X_DIMMENSION-1), (MAX_Y_DIMMENSION - 1), 0,
+ g_div_ln_btw_64_and_32_align.y1);
+#else
+ /* Scanning entire container */
+ assign(&scan_area, MAX_X_DIMMENSION - 1, MAX_Y_DIMMENSION - 1, 0, 0);
+#endif
+ ret = scan_r2l_b2t_one_dim(num_of_pages, &scan_area, allocated_pages);
+
+ /* There is not much to select, we pretty much give the first one which
+ accomodates */
+ if (ret != TilerErrorNone) {
+ PE("Failed to Allocate 1D Pages\n");
+ } else {
+ P("Yahoo found a fit: %s\n", AREA_STR(a_str, allocated_pages));
+ tile.is_occupied = OCCUPIED;
+ assign(&tile.parent_area, allocated_pages->x0,
+ allocated_pages->y0, allocated_pages->x1,
+ allocated_pages->y1);
+ /* some id, not useful now */
+ tile.reserved = g_id++;
+ /* Saying that type is 1d */
+ tile.type = ONE_D;
+ /* inserting into tiler container */
+ insert_pages_with_tiler_page(allocated_pages, tile);
+ /*updating the list of allocations */
+ insert_element(&g_allocation_list, allocated_pages, ONE_D);
+ }
+ MUTEX_REL(&g_mutex);
+ return ret;
+}
+
+
+/**
+ * @description: Allocate 2d area on availability in the container
+ *
+ * @input:'w'idth and 'h'eight of the 2d area, 'align'ment specification
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * allocated_area contain co-ordinates of TL corner Tile and BR corner Tile of
+ * the rectangle (inclusive)
+ */
+s32 allocate_2d_area(u16 w, u16 h, enum alignment align,
+ struct area_spec *allocated_area)
+{
+ s32 ret = TilerErrorNone;
+ u16 stride = ALIGN_STRIDE(align);
+ struct tiler_page tile;
+
+ P1("\n\nStart of allocation 2D Area for WxH : (%d x %d) with Alignment\
+ %d \n", w, h, stride);
+
+ /* Checking for input arguments */
+ if (allocated_area == NULL) {
+ PE("NULL input found\n");
+ return TilerErrorInvalidArg;
+ }
+ /* ALIGN_16 is currently NOT supported*/
+ if (align == ALIGN_16) {
+ PE("Align 16 NOT supported \n");
+ return TilerErrorNotSupported;
+ }
+ /*check if width and height are within limits */
+ if (w > MAX_X_DIMMENSION || w == 0 || h > MAX_Y_DIMMENSION || h == 0) {
+ PE("Invalid dimension:: %d x %d\n", w, h);
+ return TilerErrorInvalidDimension;
+ }
+ MUTEX_LOCK(&g_mutex);
+ ret = scan_areas_and_find_fit(w, h, stride, allocated_area);
+ if (ret != TilerErrorNone) {
+ PE("Did not find anything in the given area\n");
+ } else {
+ P("Yahoo found a fit: %s\n", AREA_STR(a_str, allocated_area));
+ tile.is_occupied = OCCUPIED;
+ assign(&tile.parent_area, allocated_area->x0,
+ allocated_area->y0, allocated_area->x1,
+ allocated_area->y1);
+
+ /* some id, not useful now */
+ tile.reserved = g_id++;
+ /* Saying that type is 2D */
+ tile.type = TWO_D;
+ /* inserting into tiler container */
+ ret = insert_area_with_tiler_page(allocated_area, tile);
+ if (ret == TilerErrorNone) {
+ /*updating the list of allocations */
+ insert_element(&g_allocation_list,
+ allocated_area, TWO_D);
+ } else {
+ PE("Could not insert area\n %s\n", AREA_STR(a_str,
+ allocated_area));
+ }
+ }
+ MUTEX_REL(&g_mutex);
+ return ret;
+}
+
+/**
+ * @description: Deallocate 2d or 1D allocations if previously allocated
+ *
+ * @input:'to_be_removed_area' specification: for 2D this should contain
+ * TL Corner and BR Corner of the 2D area, or for 1D allocation this should
+ * contain the start and end Tiles
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * the to_be_removed_area is removed from g_allocation_list and the
+ * corresponding tiles are marked 'NOT_OCCUPIED'
+ *
+ */
+s32 deallocate(struct area_spec *to_be_removed_area)
+{
+ s32 ret = TilerErrorNone;
+ struct tiler_page reset_tile = { NOT_OCCUPIED, {0, 0, 0, 0}, 0, 0};
+ u16 area_type;
+
+ MUTEX_LOCK(&g_mutex);
+ /*First we check if the given Area is aleast valid in our list*/
+ ret = rem_element_with_match(&g_allocation_list, to_be_removed_area,
+ &area_type);
+
+ /* If we found a positive match & removed the area details from list
+ * then we clear the contents of the associated tiles in the global
+ * container*/
+ if (ret == TilerErrorNone) {
+ if (area_type == TWO_D) {
+ P1("De-allocating TWO_D allocation %s\n",
+ AREA_STR(a_str, to_be_removed_area));
+ /*Reset tiles are inserted, ignoring ret values
+ delibrately */
+ ret = insert_area_with_tiler_page(
+ to_be_removed_area, reset_tile);
+ } else {
+ P1("De-allocating ONE_D allocation %s\n",
+ AREA_STR(a_str, to_be_removed_area));
+ ret = insert_pages_with_tiler_page(to_be_removed_area,
+ reset_tile);
+ }
+ } else {
+ PE("Did not find a Match to remove\n");
+ }
+
+ MUTEX_REL(&g_mutex);
+ return ret;
+
+}
+
+
+
+/**
+ * @description: raster scan right to left from top to bottom; find if there is
+ * a free area to fit a given w x h inside the 'scan area'. If there is a free
+ * area, then adds to short_listed candidates, which later is sent for selection
+ * as per pre-defined criteria.
+ *
+ * @input:'w x h' width and height of the allocation area.
+ * 'stride' - 64/32/None for start address alignment
+ * 'scan_area' - area in which the scan operation should take place
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * the 'alloc_area' area contains TL and BR corners of the allocated area
+ *
+ */
+s32 scan_r2l_t2b(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area)
+{
+
+ s32 xx = 0, yy = 0;
+ s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
+ s16 found_x = -1, found_y = -1;
+ u16 remainder;
+ struct area_spec_list *short_listed = NULL;
+ struct area_spec candidate_area = {0, 0, 0, 0};
+ u16 num_short_listed = 0;
+ s32 ret = TilerErrorNone;
+
+ P2("Scanning From Right 2 Left Top 2 Bottom: ScanArea: %s\n",
+ AREA_STR(a_str, scan_area));
+
+ /*Basic checks */
+ if (scan_area == NULL || alloc_area == NULL) {
+ PE("Null value found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Check if the scan area co-ordinates are valid */
+ if ((scan_area->x0 < scan_area->x1) ||
+ (scan_area->y1 < scan_area->y0)) {
+ PE("Invalid scan area: %s\n", AREA_STR(a_str, scan_area));
+ return TilerErrorInvalidScanArea;
+ }
+
+ start_x = scan_area->x0;
+ end_x = scan_area->x1;
+ start_y = scan_area->y0;
+ end_y = scan_area->y1;
+
+ /* Check if we have a scan area bigger than the given input width and
+ height */
+ if (w > INCL_LEN(start_x, end_x) || h > INCL_LEN(end_y, start_y)) {
+ PE("Scan area smaller than width and height\n");
+ return TilerErrorInvalidDimension;
+ }
+
+ /*Adjusting start_x and end_y, why scan beyond a point where we cant
+ allocate given wxh area */
+ start_x = start_x - w + 1; /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+
+ /* calculating remainder */
+ remainder = (start_x % stride);
+ /* P("remainder = %d\n",remainder); */
+
+ /* if start_x is not divisible by stride, then skip to PREV aligned
+ column */
+ start_x -= remainder ? (remainder) : 0 ;
+ /* P("StartX = %d\n",start_x); */
+
+
+ /* check if we have enough width to accomodate the request from the
+ aligned (start_y) column */
+ if (start_x < end_x) {
+ PE("No room to allocate at aligned lengths\n");
+ return TilerErrorNoRoom;
+ }
+
+ P2("Final stride : %d\n, start_x : %d end_x : %d start_y :\
+ %d end_y : %d\n", stride, start_x, end_x, start_y,
+ end_y);
+
+ /* Start scanning: These scans are always inclusive ones so if we are
+ given a start x = 0 is a valid value so if we have a end_x = 255,
+ 255th element is also checked
+ */
+ for (yy = start_y; yy <= end_y; ++yy) {
+ for (xx = start_x; xx >= end_x; xx -= stride) {
+ if (g_area_container[xx][yy].is_occupied ==
+ NOT_OCCUPIED) {
+ if (FIT == check_fit_r_and_b(w, h, xx, yy)) {
+ P3("Found Free Shoulder at:\
+ (%d, %d)\n", xx, yy);
+ found_x = xx;
+ found_y = yy;
+ /* Insert this candidate, it is just a
+ co-ordinate, reusing Area */
+ assign(&candidate_area, xx, yy, 0, 0);
+ insert_element(&short_listed,
+ &candidate_area, TWO_D);
+ num_short_listed++;
+ /*changing upper bound on x direction */
+#ifdef X_SCAN_LIMITER
+ end_x = xx + 1;
+#endif
+ break;
+ }
+ } else {
+ /* Optimization required only for Non Aligned,
+ Aligned anyways skip by 32/64 tiles at a time */
+ if (stride == 1 &&
+ g_area_container[xx][yy].type ==
+ TWO_D) {
+ xx = g_area_container
+ [xx][yy].parent_area.x0;
+ P3("Moving to parent location start_x\
+ (%d %d)\n", xx, yy);
+ }
+ }
+
+
+ }
+
+ /* if you find a free area shouldering the given scan area on
+ then we can break
+ */
+#ifdef Y_SCAN_LIMITER
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+
+ if (!short_listed) {
+ PE("No candidates found in a given scan area\n");
+ return TilerErrorNoRoom;
+ }
+
+ /*Now if we are here it implies we have potential candidates*/
+ ret = select_candidate(w, h, num_short_listed, short_listed, scan_area,
+ g_scan_criteria_r2l_t2b, alloc_area);
+
+ if (ret != TilerErrorNone)
+ PE("Error in Selecting a Candidate\n");
+ /*Just clean up resources */
+ clean_list(&short_listed);
+
+ /* dump_list_entries(short_listed); */
+ return ret;
+
+}
+
+
+/**
+ * @description: raster scan right to left from bottom to top; find if there is
+ * a free area to fit a given w x h inside the 'scan area'. If there is a free
+ * area, then adds to short_listed candidates, which later is sent for selection
+ * as per pre-defined criteria.
+ *
+ * @input:'w x h' width and height of the allocation area.
+ * 'stride' - 64/32/None for start address alignment
+ * 'scan_area' - area in which the scan operation should take place
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * the 'alloc_area' area contains TL and BR corners of the allocated area
+ *
+ */
+s32 scan_r2l_b2t(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area)
+{
+
+ /* TO DO: Should i check scan area?
+ Might have to take it as input during initialization
+ */
+ s32 xx = 0, yy = 0;
+ s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
+ s16 found_x = -1, found_y = -1;
+ u16 remainder;
+ struct area_spec_list *short_listed = NULL;
+ struct area_spec candidate_area = {0, 0, 0, 0};
+ u16 num_short_listed = 0;
+ s32 ret = TilerErrorNone;
+
+ P2("Scanning From Right 2 Left Bottom 2 Top, ScanArea: %s\n",
+ AREA_STR(a_str, scan_area));
+ start_x = scan_area->x0;
+ end_x = scan_area->x1;
+ start_y = scan_area->y0;
+ end_y = scan_area->y1;
+
+
+ /*Basic checks */
+ if (scan_area == NULL || alloc_area == NULL) {
+ PE("Null value found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Check if the scan area co-ordinates are valid */
+ if ((scan_area->x1 < scan_area->x0) ||
+ (scan_area->y1 < scan_area->y0)) {
+ PE("Invalid scan area: %s\n", AREA_STR(a_str, scan_area));
+ return TilerErrorInvalidScanArea;
+ }
+
+ /* Check if we have a scan area bigger than the given input width
+ and height */
+ if (w > INCL_LEN(start_x, end_x) || h > INCL_LEN(start_y, end_y)) {
+ PE("Scan area smaller than width and height\n");
+ return TilerErrorInvalidDimension;
+ }
+
+ /*Adjusting start_x and end_y, why scan beyond a point where we cant
+ allocate given wxh area */
+ start_x = start_x - w + 1; /* + 1 to be inclusive */
+ start_y = start_y - h + 1;
+
+
+ /* calculating remainder */
+ remainder = (start_x % stride);
+ /* P("remainder = %d\n",remainder); */
+
+ /* if start_x is not divisible by stride, then skip to PREV aligned
+ column */
+ start_x -= remainder ? (remainder) : 0 ;
+ /* P("StartX = %d\n",start_x); */
+
+
+ /* check if we have enough width to accomodate the request from the
+ aligned (start_y) column */
+ if (start_x < end_x) {
+ PE("No room to allocate at aligned lengths\n");
+ return TilerErrorNoRoom;
+ }
+
+ P2("Final stride : %d\n, start_x : %d end_x : %d start_y : %d end_y :\
+ %d\n", stride, start_x, end_x, start_y, end_y);
+
+ /* Start scanning: These scans are always inclusive ones so if we are
+ given a start x = 0 is a valid value so if we have a end_x = 255,
+ 255th element is also checked
+ */
+ for (yy = start_y; yy >= end_y; --yy) {
+ for (xx = start_x; xx >= end_x; xx -= stride) {
+ if (!g_area_container[xx][yy].is_occupied) {
+ if (check_fit_r_and_b(w, h, xx, yy) == FIT) {
+ P3("Found Free Shoulder at: (%d, %d)\n",
+ xx, yy);
+ found_x = xx;
+ found_y = yy;
+ /* Insert this candidate, it is just a
+ co-ordinate, reusing Area */
+ assign(&candidate_area, xx, yy, 0, 0);
+ insert_element(&short_listed,
+ &candidate_area, TWO_D);
+ num_short_listed++;
+ /*changing upper bound on x direction */
+#ifdef X_SCAN_LIMITER
+ end_x = xx + 1;
+#endif
+ break;
+ }
+ } else {
+ /* Optimization required only for Non Aligned,
+ Aligned anyways skip by 32/64 tiles at a time */
+ if (stride == 1 && g_area_container
+ [xx][yy].type == TWO_D) {
+ xx = g_area_container
+ [xx][yy].parent_area.x0;
+ P3("Moving to parent location start_x\
+ (%d %d)\n", xx, yy);
+ }
+ }
+
+ }
+
+ /* if you find a free area shouldering the given scan area on
+ then we can break
+ */
+#ifdef Y_SCAN_LIMITER
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+
+ if (!short_listed) {
+ PE("No candidates found in a given scan area\n");
+ return TilerErrorNoRoom;
+ }
+
+ /*Now if we are here it implies we have potential candidates*/
+ ret = select_candidate(w, h, num_short_listed, short_listed, scan_area,
+ g_scan_criteria_r2l_b2t, alloc_area);
+
+ if (ret != TilerErrorNone)
+ PE("Error in Selecting a Candidate\n");
+ /*Just clean up resources */
+ clean_list(&short_listed);
+
+ /* dump_list_entries(short_listed); */
+ return ret;
+
+}
+
+
+
+/**
+ * @description: raster scan left to right from top to bottom; find if there is
+ * a free area to fit a given w x h inside the 'scan area'. If there is a free
+ * area, then adds to short_listed candidates, which later is sent for selection
+ * as per pre-defined criteria.
+ *
+ * @input:'w x h' width and height of the allocation area.
+ * 'stride' - 64/32/None for start address alignment
+ * 'scan_area' - area in which the scan operation should take place
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * the 'alloc_area' area contains TL and BR corners of the allocated area
+ *
+ */
+
+s32 scan_l2r_t2b(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area)
+{
+ s32 xx = 0, yy = 0;
+ s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
+ s16 found_x = -1, found_y = -1;
+ u16 remainder;
+ struct area_spec_list *short_listed = NULL;
+ struct area_spec candidate_area = {0, 0, 0, 0};
+ u16 num_short_listed = 0;
+ s32 ret = TilerErrorNone;
+
+ P2("Scanning From Left 2 Right Top 2 Bottom, ScanArea: %s\n",
+ AREA_STR(a_str, scan_area));
+
+ start_x = scan_area->x0;
+ end_x = scan_area->x1;
+ start_y = scan_area->y0;
+ end_y = scan_area->y1;
+
+ /*Basic checks */
+ if (scan_area == NULL || alloc_area == NULL) {
+ PE("Null value found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Check if the scan area co-ordinates are valid */
+ if ((scan_area->x1 < scan_area->x0) ||
+ (scan_area->y1 < scan_area->y0)) {
+ PE("Invalid scan area: %s\n", AREA_STR(a_str, scan_area));
+ return TilerErrorInvalidScanArea;
+ }
+
+ /* Check if we have a scan area bigger than the given input width and
+ height */
+ if (w > INCL_LEN(end_x, start_x) || h > INCL_LEN(end_y, start_y)) {
+ PE("Scan area smaller than width and height\n");
+ return TilerErrorInvalidDimension;
+ }
+
+ /* calculating remainder */
+ remainder = (start_x % stride);
+
+ /* if start_x is not divisible by stride, then skip to next aligned
+ column */
+ start_x += remainder ? (stride - remainder) : 0 ;
+
+ /* check if we have enough width to accomodate the request from the
+ aligned (start_y) column */
+ if (w > INCL_LEN(end_x, start_x)) {
+ PE("No room to allocate at aligned lengths\n");
+ return TilerErrorNoRoom;
+ }
+
+ /*Adjusting end_x and end_y, why scan beyond a point where we cant
+ allocate given wxh area */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* Just updating start and ends */
+
+ /* P(" stride : %d\n, start_x : %d end_x : %d start_y : %d end_y : %d\n"
+ ,stride, start_x,end_x,start_y,end_y);*/
+
+ /* Start scanning: These scans are always inclusive ones so if we are
+ given a start x = 0 is a valid value so if we have a end_x = 255,
+ 255th element is also checked
+ */
+ for (yy = start_y; yy <= end_y; ++yy) {
+ for (xx = start_x; xx <= end_x; xx += stride) {
+ /* if NOT occupied */
+ if (g_area_container[xx][yy].is_occupied ==
+ NOT_OCCUPIED) {
+ if (FIT == check_fit_r_and_b(w, h, xx, yy)) {
+ P3("Found Free Shoulder at: (%d, %d)\n",
+ xx, yy);
+ found_x = xx;
+ found_y = yy;
+ /* Insert this candidate, it is just a
+ co-ordinate, reusing Area */
+ assign(&candidate_area, xx, yy, 0, 0);
+ insert_element(&short_listed,
+ &candidate_area, TWO_D);
+ num_short_listed++;
+ /*changing upper bound on x direction */
+#ifdef X_SCAN_LIMITER
+ end_x = xx - 1;
+#endif
+ break;
+ }
+ } else {
+ /* Optimization required only for Non Aligned,
+ Aligned anyways skip by 32/64 tiles at a time */
+ if (stride == 1 && g_area_container
+ [xx][yy].type == TWO_D) {
+ xx = g_area_container
+ [xx][yy].parent_area.x1;
+ P3("Moving to parent location end_x\
+ (%d %d)\n", xx, yy);
+ }
+ }
+ }
+ /* if you find a free area shouldering the given scan area on
+ then we can break
+ */
+#ifdef Y_SCAN_LIMITER
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!short_listed) {
+ PE("No candidates found in a given scan area\n");
+ return TilerErrorNoRoom;
+ }
+
+ /*Now if we are here it implies we have potential candidates*/
+ ret = select_candidate(w, h, num_short_listed, short_listed, scan_area,
+ g_scan_criteria_l2r_t2b, alloc_area);
+
+ if (ret != TilerErrorNone)
+ PE("Error in Selecting a Candidate\n");
+ /*Just clean up resources */
+ clean_list(&short_listed);
+
+ /* dump_list_entries(short_listed); */
+ return ret;
+}
+
+/**
+ * @description: raster scan left to right from bottom to top; find if there is
+ * a free area to fit a given w x h inside the 'scan area'. If there is a free
+ * area, then adds to short_listed candidates, which later is sent for selection
+ * as per pre-defined criteria.
+ *
+ * @input:'w x h' width and height of the allocation area.
+ * 'stride' - 64/32/None for start address alignment
+ * 'scan_area' - area in which the scan operation should take place
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * the 'alloc_area' area contains TL and BR corners of the allocated area
+ *
+ */
+s32 scan_l2r_b2t(u16 w, u16 h, u16 stride, struct area_spec *scan_area,
+ struct area_spec *alloc_area)
+{
+ s32 xx = 0, yy = 0;
+ s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
+ s16 found_x = -1, found_y = -1;
+ u16 remainder;
+ struct area_spec_list *short_listed = NULL;
+ struct area_spec candidate_area = {0, 0, 0, 0};
+ u16 num_short_listed = 0;
+ s32 ret = TilerErrorNone;
+
+ P2("Scanning From Left 2 Right Bottom 2 Top, ScanArea: %s\n",
+ AREA_STR(a_str, scan_area));
+
+ start_x = scan_area->x0;
+ end_x = scan_area->x1;
+ start_y = scan_area->y0;
+ end_y = scan_area->y1;
+
+ /*Basic checks */
+ if (scan_area == NULL || alloc_area == NULL) {
+ PE("Null value found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Check if the scan area co-ordinates are valid */
+ if ((scan_area->x1 < scan_area->x0) ||
+ (scan_area->y0 < scan_area->y1)) {
+ PE("Invalid scan area: %s\n", AREA_STR(a_str, scan_area));
+ return TilerErrorInvalidScanArea;
+ }
+
+ /* Check if we have a scan area bigger than the given input width and
+ height */
+ if (w > INCL_LEN(end_x, start_x) || h > INCL_LEN(start_y, end_y)) {
+ PE("Scan area smaller than width and height\n");
+ return TilerErrorInvalidDimension;
+ }
+
+ /* calculating remainder */
+ remainder = (start_x % stride);
+
+ /* if start_x is not divisible by stride, then skip to next aligned
+ column */
+ start_x += remainder ? (stride - remainder) : 0 ;
+
+ /* check if we have enough width to accomodate the request from the
+ aligned (start_x) column */
+ if (w > INCL_LEN(end_x, start_x)) {
+ PE("No room to allocate at aligned lengths\n");
+ return TilerErrorNoRoom;
+ }
+
+ /*Adjusting end_x and end_y, why scan beyond a point where we cant
+ allocate given wxh area */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ start_y = start_y - h + 1;
+
+ /* Just updating start and ends */
+
+ P2(" stride : %d\n, start_x : %d end_x : %d start_y : %d end_y : %d\n",
+ stride, start_x, end_x, start_y, end_y);
+
+ /* Start scanning: These scans are always inclusive ones so if we are
+ given a start x = 0 is a valid value so if we have a end_x = 255,
+ 255th element is also checked
+ */
+ for (yy = start_y; yy >= end_y; --yy) {
+ for (xx = start_x; xx <= end_x; xx += stride) {
+ /* if NOT occupied */
+ if (!g_area_container[xx][yy].is_occupied) {
+ if (check_fit_r_and_b(w, h, xx, yy) == FIT) {
+ P3("Found Free Shoulder at: (%d, %d)\n",
+ xx, yy);
+ found_x = xx;
+ found_y = yy;
+ /* Insert this candidate, it is just a
+ co-ordinate, reusing Area */
+ assign(&candidate_area, xx, yy, 0, 0);
+ insert_element(&short_listed,
+ &candidate_area, TWO_D);
+ num_short_listed++;
+ /*changing upper bound on x direction */
+#ifdef X_SCAN_LIMITER
+ end_x = xx - 1;
+#endif
+ break;
+ }
+ } else {
+ /* Optimization required only for Non Aligned,
+ Aligned anyways skip by 32/64 tiles at a time */
+ if (stride == 1 && g_area_container
+ [xx][yy].type == TWO_D) {
+ xx = g_area_container
+ [xx][yy].parent_area.x1;
+ P3("Moving to parent location end_x\
+ (%d %d)\n", xx, yy);
+ }
+ }
+ }
+
+ /* if you find a free area shouldering the given scan area on
+ then we can break
+ */
+#ifdef Y_SCAN_LIMITER
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!short_listed) {
+ PE("No candidates found in a given scan area\n");
+ return TilerErrorNoRoom;
+ }
+
+ /*Now if we are here it implies we have potential candidates*/
+ ret = select_candidate(w, h, num_short_listed, short_listed, scan_area,
+ g_scan_criteria_l2r_b2t, alloc_area);
+
+ if (ret != TilerErrorNone)
+ PE("Error in Selecting a Candidate\n");
+ /*Just clean up resources */
+ clean_list(&short_listed);
+
+ /* dump_list_entries(short_listed); */
+ return ret;
+}
+
+/*
+Note: In General the cordinates specified in the scan area area relavent to the
+scan sweep directions. i.e A scan Area from Top Left Corner will have x0 <= x1
+and y0 <= y1. Where as A scan Area from bottom Right Corner will have x1 <= x0
+and y1 <= y0
+*/
+
+/**
+ * @description: raster scan right to left from bottom to top; find if there are
+ * continuous free pages(one tile is one page, continuity always from left to
+ * right) inside the 'scan area'. If there are enough continous free pages,
+ * then it returns the start and end Tile/page co-ordinates inside 'alloc_area'
+ *
+ * @input:'num_of_pages' required,
+ * 'scan_area' - area in which the scan operation should take place
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ * the 'alloc_area' area contains start and end tile (inclusive).
+ *
+ */
+s32 scan_r2l_b2t_one_dim(u32 num_of_pages, struct area_spec *scan_area,
+ struct area_spec *alloc_area)
+{
+ u16 x, y;
+ u16 left_x, left_y, busy_x, busy_y;
+ s32 ret = TilerErrorNone;
+ s32 fit = NO_FIT;
+
+ /*Basic checks */
+ if (scan_area == NULL || alloc_area == NULL) {
+ PE("Null arguments found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ if (scan_area->y0 < scan_area->y1) {
+ PE("Invalid scan area: %s\n", AREA_STR(a_str, scan_area));
+ return TilerErrorInvalidScanArea;
+ }
+
+ P2("Scanning From Right 2 Left Bottom 2 Top for 1D: ScanArea: %s\n",
+ AREA_STR(a_str, scan_area));
+
+ /* Note: Checking sanctity of scan area
+ * The reason for checking this that 1D allocations assume that the X
+ ranges the entire TilerSpace X ie ALL Columns
+ * The scan area can limit only the Y ie, Num of Rows for 1D allocation.
+ We also expect we could have only 1 row for 1D allocation
+ * i.e our scan_area y0 and y1 may have a same value.
+ */
+
+ /*
+ Thinking Aloud: Should i even worry about width (X dimension)??
+ Can't i just ignore X and then take the u16 (Y dimension) and assume X
+ to range from MAX_X_DIMEN to 0 ??
+ */
+ if (MAX_X_DIMMENSION != (1 + (scan_area->x0 - scan_area->x1))) {
+ PE("Not a valid Scan Area, for 1D the width should be entire\
+ Tiler u16 (%d) but it is (%d)\n",
+ MAX_X_DIMMENSION, 1 + (scan_area->x0 - scan_area->x1));
+ return TilerErrorInvalidDimension;
+ }
+
+
+ /* checking if scan area can accomodate the num_of_pages */
+ if (num_of_pages > MAX_X_DIMMENSION * INCL_LEN(scan_area->y0,
+ scan_area->y1)) {
+ PE("Num of Pages exceed Max possible (%d) for a given scan area\
+ %s\n", MAX_X_DIMMENSION *\
+ (scan_area->y0 - scan_area->y1),
+ AREA_STR(a_str, scan_area));
+ return TilerErrorNoRoom;
+ }
+
+ /* Ah we are here, it implies we can try fitting now after we have
+ checked everything */
+ left_x = scan_area->x0;
+ left_y = scan_area->y0;
+ while (ret == TilerErrorNone) {
+ x = left_x;
+ y = left_y;
+
+ /* P("Checking if (%d %d) is Occupied\n",x,y); */
+ if (g_area_container[x][y].is_occupied == NOT_OCCUPIED) {
+ ret = move_left(x, y, (num_of_pages - 1),
+ &left_x, &left_y);
+ if (ret == TilerErrorNone) {
+ P3("Moved left to (%d %d) for num_of_pages (%d)\
+ ,checking for fit\n", left_x,
+ left_y, (num_of_pages - 1));
+ fit = check_fit_r_one_dim(left_x, left_y,
+ num_of_pages, &busy_x, &busy_y);
+ if (fit == FIT) {
+ /*Implies we are fine, we found a place
+ to put our 1D alloc */
+ assign(alloc_area, left_x, left_y,
+ busy_x, busy_y);
+ P3("Allocated 1D area: %s\n",
+ AREA_STR(a_str, alloc_area));
+ break;
+ } else {
+ /* Implies it did not fit, the busy_x,
+ busy_y will now be pointing to the Tile
+ that was found busy/Occupied */
+ x = busy_x;
+ y = busy_y;
+ }
+ } else {
+ PE("Error in Moving left: Error Code %d,\
+ Breaking....\n", ret);
+ break;
+ }
+ }
+
+ /* Ah if we are here then we the Tile is occupied, now we might
+ move to the start of parent locations */
+ if (g_area_container[x][y].type == ONE_D) {
+ busy_x = g_area_container[x][y].parent_area.x0;
+ busy_y = g_area_container[x][y].parent_area.y0;
+ } else {
+ busy_x = g_area_container[x][y].parent_area.x0;
+ busy_y = y;
+ }
+ x = busy_x;
+ y = busy_y;
+
+ P3("Busy Tile found moving to ParentArea start :\
+ (%d %d)\n", x, y);
+ ret = move_left(x, y, 1, &left_x, &left_y);
+ }
+
+
+ if (!fit)
+ ret = TilerErrorNoRoom;
+
+ return ret;
+}
+
+
+
+/**
+ * @description:
+ *
+ *
+ *
+ *
+ * @input:
+ *
+ *
+ * @return 0 on success, non-0 error value on failure. On success
+ *
+ *
+ */
+s32 scan_areas_and_find_fit(u16 w, u16 h, u16 stride,
+ struct area_spec *allocated_area)
+{
+ /* Checking for input arguments */
+ /* No need to do this check, we have checked it in the parent call */
+ struct area_spec scan_area = {0, 0, 0, 0};
+ s32 ret = TilerErrorGeneral;
+ u16 boundary_x = 0, boundary_y = 0;
+ s32 need_scan_flag = 2;
+
+ if (stride == 64) {
+ boundary_x = g_div_ln_btw_64_and_32_align.x1 - 1;
+ boundary_y = g_div_ln_btw_64_and_32_align.y1 - 1;
+
+ /* more intelligence here */
+ if (w > g_div_ln_btw_64_and_32_align.x1) {
+ boundary_x = MAX_X_DIMMENSION - 1;
+ --need_scan_flag;
+ }
+ if (h > g_div_ln_btw_64_and_32_align.y1) {
+ boundary_y = MAX_Y_DIMMENSION - 1;
+ --need_scan_flag;
+ }
+
+ assign(&scan_area, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(w, h, stride, &scan_area, allocated_area);
+
+ if (ret != TilerErrorNone && need_scan_flag) {
+ /*Fall back Scan the entire Tiler area*/
+ assign(&scan_area, 0, 0, MAX_X_DIMMENSION - 1,
+ MAX_Y_DIMMENSION - 1);
+ ret = scan_l2r_t2b(w, h, stride, &scan_area,
+ allocated_area);
+ }
+ } else if (stride == 32) {
+
+ boundary_x = g_div_ln_btw_64_and_32_align.x1;
+ boundary_y = g_div_ln_btw_64_and_32_align.y1-1;
+
+ /* more intelligence here */
+ if (w > (MAX_X_DIMMENSION - g_div_ln_btw_64_and_32_align.x1)) {
+ boundary_x = 0;
+ --need_scan_flag;
+ }
+ if (h > g_div_ln_btw_64_and_32_align.y1) {
+ boundary_y = MAX_Y_DIMMENSION - 1;
+ --need_scan_flag;
+ }
+
+ assign(&scan_area, MAX_X_DIMMENSION - 1, 0, boundary_x,
+ boundary_y);
+ ret = scan_r2l_t2b(w, h, stride, &scan_area, allocated_area);
+
+ if (ret != TilerErrorNone && need_scan_flag) {
+ /*Fall back Scan the entire Tiler area*/
+ assign(&scan_area, MAX_X_DIMMENSION - 1, 0, 0,
+ MAX_Y_DIMMENSION - 1);
+ ret = scan_r2l_t2b(w, h, stride, &scan_area,
+ allocated_area);
+ }
+ } else if (stride == 1) {
+
+ /*The reason we use 64-Align area is because we dont want to
+ grow down and reduced 1D space */
+ if (h > g_div_ln_btw_64_and_32_align.y1) {
+ need_scan_flag -= 2;
+ assign(&scan_area, 0, 0, MAX_X_DIMMENSION - 1,
+ MAX_Y_DIMMENSION - 1);
+ ret = scan_l2r_t2b(w, h, stride, &scan_area,
+ allocated_area);
+ } else {
+ assign(&scan_area, 0,
+ g_div_ln_btw_64_and_32_align.y1 - 1,
+ MAX_X_DIMMENSION - 1, 0);
+ /*Scans Up in 64 and 32 areas accross the whole width */
+ ret = scan_l2r_b2t(w, h, stride, &scan_area,
+ allocated_area);
+ }
+
+ if (ret != TilerErrorNone && need_scan_flag) {
+ assign(&scan_area, 0, 0, MAX_X_DIMMENSION - 1,
+ MAX_Y_DIMMENSION - 1);
+ ret = scan_l2r_t2b(w, h, stride, &scan_area,
+ allocated_area);
+ }
+ }
+
+ return ret;
+
+}
+
+
+s32 check_fit_r_and_b(u16 w, u16 h, u16 left_x, u16 top_y)
+{
+ u16 xx = 0, yy = 0;
+ s32 ret = FIT;
+ for (yy = top_y; yy < top_y+h; ++yy) {
+ for (xx = left_x; xx < left_x+w; ++xx) {
+ /*P("Checking Occ: (%d %d) - %d\n",xx,yy,
+ g_area_container[xx][yy].is_occupied); */
+ if (g_area_container[xx][yy].is_occupied == OCCUPIED) {
+ ret = NO_FIT;
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+s32 check_fit_r_one_dim(u16 x, u16 y, u32 num_of_pages, u16 *busy_x,
+ u16 *busy_y)
+{
+ s32 fit = FIT;
+ s32 ret = TilerErrorNone;
+ s32 i = 0;
+
+ *busy_x = x;
+ *busy_y = y;
+
+ P2("checking Fit for (%d) pages from (%d %d)\n ", num_of_pages, x, y);
+ while (i < num_of_pages) {
+ /* P("Checking if occupied (%d %d)\n",x, y); */
+ if (g_area_container[x][y].is_occupied) {
+ /*Oh the Tile is occupied so we break and let know that
+ we encoutered a BUSY tile */
+ fit = NO_FIT;
+
+ /* Now going to the start of the parent allocation
+ so we avoid unecessary checking */
+
+ if (g_area_container[x][y].type == ONE_D) {
+ *busy_x = g_area_container[x][y].parent_area.x0;
+ *busy_y = g_area_container[x][y].parent_area.y0;
+ } else {
+ *busy_x = g_area_container[x][y].parent_area.x0;
+ *busy_y = y;
+ }
+ /* To Do: */
+ /*Could also move left in case of TWO_D*/
+
+ P2("Busy Tile found moving to ParentArea start :\
+ (%d %d)\n", *busy_x, *busy_y);
+ break;
+ }
+
+ i++;
+ /* Sorry for this ugly code, i had to break before moving right
+ unecessarily */
+ /* This also helps my dual purpose busy_x and busy_y with the
+ end co-ordinates of 1D allocations*/
+ if (i == num_of_pages)
+ break;
+
+ ret = move_right(x, y, 1, busy_x, busy_y);
+ if (ret != TilerErrorNone) {
+ PE("Error in Moving Right.. Breaking...\n");
+ fit = NO_FIT;
+ break;
+ } else {
+ x = *busy_x;
+ y = *busy_y;
+ }
+ }
+
+ return fit;
+}
+
+
+
+
+s32 insert_area_with_tiler_page(struct area_spec *area, struct tiler_page tile)
+{
+ s32 x, y;
+ if (area->x0 < 0 || area->x1 >= MAX_X_DIMMENSION || area->y0 < 0 ||
+ area->y1 >= MAX_Y_DIMMENSION) {
+ PE("Invalid dimensions\n");
+ return TilerErrorInvalidDimension;
+ }
+ P2("Inserting Tiler Page at (%d %d) (%d %d)\n", area->x0, area->y0,
+ area->x1, area->y1);
+
+ /*If you are here: basic checks are done */
+ for (x = area->x0; x <= area->x1; ++x)
+ for (y = area->y0; y <= area->y1; ++y)
+ g_area_container[x][y] = tile;
+ return TilerErrorNone;
+}
+
+
+s32 insert_pages_with_tiler_page(struct area_spec *area, struct tiler_page tile)
+{
+
+ u16 x = 0, y = 0;
+ u16 right_x = 0, right_y = 0;
+ s32 ret = TilerErrorNone;
+ if (area == NULL) {
+ PE("Null input received\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Note: By the way I expect Pages specified from Right to Left */
+ /* TO DO:
+ *Do i really need to check integrity of area specs?
+ */
+
+ P2("Inserting Tiler Pages from (%d %d) to (%d %d)\n", area->x0,
+ area->y0, area->x1, area->y1);
+
+
+ x = area->x0;
+ y = area->y0;
+ while (!(x == area->x1 && y == area->y1)) {
+ /* P("(%d %d)\n",x,y); */
+ g_area_container[x][y] = tile;
+ ret = move_right(x, y, 1, &right_x, &right_y);
+ if (ret == TilerErrorNone) {
+ x = right_x;
+ y = right_y;
+ } else {
+ PE("Error in Moving right\n");
+ return ret;
+ }
+ }
+ /*Of course since this is inclusive we need to set the last tile too */
+ g_area_container[x][y] = tile;
+ return TilerErrorNone;
+}
+
+static s32 select_candidate(IN u16 w, IN u16 h, IN u16 num_short_listed,
+ IN struct area_spec_list *short_listed, IN struct area_spec *scan_area,
+ IN s32 criteria, OUT struct area_spec *alloc_area)
+{
+ /* book keeping the winner */
+ struct area_spec_list *win_candidate = NULL;
+ struct nearness_factor win_near_factor = {0.0, 0.0};
+ struct neighbour_stats win_neigh_stats = {0, 0, 0, 0, 0, 0, 0, 0};
+ u16 win_total_neighs = 0;
+
+ /*book keeping the current one being evaluated */
+ struct area_spec_list *cur_candidate = NULL;
+ struct area_spec *cur_area = NULL;
+ struct nearness_factor cur_near_factor = {0.0, 0.0};
+ struct neighbour_stats cur_neigh_stats = {0, 0, 0, 0, 0, 0, 0, 0};
+ u16 cur_total_neighs = 0;
+
+ /*Should i swap_flag? */
+ u8 swap_flag = NO;
+
+ /* I am sure that Alloc Area == NULL is checked earlier, but still
+ checking */
+ if (alloc_area == NULL || scan_area == NULL) {
+ PE("NULL input found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*if there is only one candidate then that is the selection*/
+ if (num_short_listed == 1) {
+ /* Note: Sure we could have done this in the previous function,
+ but just wanted this to be cleaner so having
+ * one place where the selection is made. Here I am returning
+ the first one
+ */
+ assign(alloc_area, short_listed->area.x0, short_listed->area.y0,
+ short_listed->area.x0 + w - 1, short_listed->area.y0 + h - 1);
+ return TilerErrorNone;
+ }
+
+ /* If first found is enabled then we just provide bluntly the first
+ found candidate
+ * NOTE: For Horizontal bias we just give the first found, because our
+ * scan is Horizontal raster based and the first candidate will always
+ * be the same as if selecting the Horizontal one.
+ */
+ if (criteria & CR_FIRST_FOUND || criteria & CR_BIAS_HORIZONTAL) {
+ assign(alloc_area, short_listed->area.x0, short_listed->area.y0,
+ short_listed->area.x0 + w - 1, short_listed->area.y0 + h - 1);
+ return TilerErrorNone;
+ }
+
+ /* lets calculate for the first candidate and assign him the winner and
+ replace with the one who has better credentials w/ to the criteria */
+
+ win_candidate = short_listed;
+ get_busy_neigh_stats(w, h, &short_listed->area, &win_neigh_stats);
+ win_total_neighs = TOTAL_BOUNDARY(&win_neigh_stats) +
+ TOTAL_OCCUPIED(&win_neigh_stats);
+ get_nearness_factor(scan_area, &short_listed->area, &win_near_factor);
+ /* now check from second candidate onwards */
+ cur_candidate = short_listed->next;
+
+ while (cur_candidate != NULL) {
+
+ /* Calculating all the required statistics, though not using
+ * all of them in all Criteria, but makes simpler code
+ */
+ cur_area = &cur_candidate->area;
+ get_busy_neigh_stats(w, h, cur_area, &cur_neigh_stats);
+ get_nearness_factor(scan_area, cur_area, &cur_near_factor);
+ /* Check against the winner, if this one is better */
+ cur_total_neighs = TOTAL_BOUNDARY(&cur_neigh_stats) +
+ TOTAL_OCCUPIED(&cur_neigh_stats);
+
+
+
+ /* PREFER MAX NEIGHBOURS */
+ if (criteria & CR_MAX_NEIGHS) {
+ if (cur_total_neighs > win_total_neighs)
+ swap_flag = YES;
+ }
+
+ /* I am not checking for the condition where both
+ * INCL_LENS are same, because the logic does not find new
+ * shoulders on the same row after it finds a fit
+ */
+ if (criteria & CR_BIAS_VERTICAL) {
+ P("cur distance :%d win distance: %d\n",
+ INCL_LEN_MOD(cur_area->y0, scan_area->y0),
+ INCL_LEN_MOD(win_candidate->area.y0,
+ scan_area->y0))
+ if (INCL_LEN_MOD(cur_area->y0, scan_area->y0) >
+ INCL_LEN_MOD(win_candidate->area.y0,
+ scan_area->y0)) {
+ swap_flag = YES;
+ }
+ }
+
+
+ if (criteria & CR_DIAGONAL_BALANCE) {
+
+ /* Check against the winner, if this one is better */
+ cur_total_neighs = TOTAL_BOUNDARY(&cur_neigh_stats) +
+ TOTAL_OCCUPIED(&cur_neigh_stats);
+
+ if (win_total_neighs <= cur_total_neighs) {
+ P3("Logic: Oh win_total_neighs(%d) <=\
+ cur_total_neighs(%d)\n",
+ win_total_neighs, cur_total_neighs);
+ if (win_total_neighs < cur_total_neighs ||
+ (TOTAL_OCCUPIED(&win_neigh_stats) <
+ TOTAL_OCCUPIED(&cur_neigh_stats))) {
+ P3("Logic: Found one with more\
+ neighbours win_total_neighs:%d\
+ cur_total_neighs:%d WinOcc: %d,\
+ CurOcc: %d\n", win_total_neighs,
+ cur_total_neighs,
+ TOTAL_OCCUPIED(
+ &win_neigh_stats),
+ TOTAL_OCCUPIED(
+ &cur_neigh_stats));
+ swap_flag = YES;
+ } else if ((TOTAL_OCCUPIED(&win_neigh_stats) ==
+ TOTAL_OCCUPIED(&cur_neigh_stats))) {
+ /*If we are here implies that
+ win_total_neighs == cur_total_neighs
+ && Total_Occupied(win) ==
+ TotalOccupied(cur) */
+ /*Now we check the nearness factor */
+ P3("Logic: Ah WinOcc(%d) == CurOcc:\
+ (%d), so checking Nearness factor\n",
+ TOTAL_OCCUPIED(&win_neigh_stats),
+ TOTAL_OCCUPIED(&cur_neigh_stats));
+ P3("Logic: Hmm winNF (%3f) & curNF\
+ (%3f)\n",
+ (double)(win_near_factor.nf_x
+ + win_near_factor.nf_y),
+ (double)(cur_near_factor.nf_x
+ + cur_near_factor.nf_y));
+ if ((s32)(win_near_factor.nf_x +
+ win_near_factor.nf_y) >
+ (s32)(cur_near_factor.nf_x +
+ cur_near_factor.nf_y)) {
+ P3("Logic: So, nearness factor\
+ of Cur is <\
+ than Win\n");
+ swap_flag = YES;
+ }
+
+ }
+
+ }
+
+
+ }
+
+ /* Swap the win candidate with cur-candidate with better
+ * credentials
+ */
+ if (swap_flag) {
+ win_candidate = cur_candidate;
+ win_near_factor = cur_near_factor;
+ win_neigh_stats = cur_neigh_stats;
+ win_total_neighs = cur_total_neighs;
+ swap_flag = NO;
+ }
+
+ /*Go to next candidate */
+ cur_candidate = cur_candidate->next;
+
+ }
+
+
+ assign(alloc_area, win_candidate->area.x0, win_candidate->area.y0,
+ win_candidate->area.x0+w - 1, win_candidate->area.y0 + h - 1);
+ return TilerErrorNone;
+}
+
+s32 get_nearness_factor(struct area_spec *scan_area,
+ struct area_spec *candidate, struct nearness_factor *nf)
+{
+ if (nf == NULL || scan_area == NULL || candidate == NULL) {
+ PE("NULL input found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /* For the following calculation we need worry of +/- sign, the
+ relative distances take of this */
+ nf->nf_x = (s32)(candidate->x0 - scan_area->x0)/
+ (scan_area->x1 - scan_area->x0);
+ nf->nf_y = (s32)(candidate->y0 - scan_area->y0)/
+ (scan_area->y1 - scan_area->y0);
+ return TilerErrorNone;
+
+}
+
+/*Neighbours
+
+ |<-----T------>|
+ _ _______________ _
+ L | Area | R
+ _ |______________|_
+ |<-----B------>|
+*/
+s32 get_busy_neigh_stats(u16 width, u16 height,
+struct area_spec *top_left_corner, struct neighbour_stats *neighbour_stat)
+{
+ s16 xx = 0, yy = 0;
+ struct area_spec left_edge;
+ struct area_spec right_edge;
+ struct area_spec top_edge;
+ struct area_spec bottom_edge;
+
+ if (neighbour_stat == NULL) {
+ PE("Null input received\n");
+ return TilerErrorInvalidArg;
+ }
+
+ if (width == 0 || height == 0) {
+ PE("u16 or height is Zero \n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Clearing any exisiting values */
+ memset(neighbour_stat, 0, sizeof(struct neighbour_stats));
+
+ /* Finding Top Edge */
+ assign(&top_edge, top_left_corner->x0, top_left_corner->y0,
+ top_left_corner->x0 + width - 1, top_left_corner->y0);
+
+ /* Finding Bottom Edge */
+ assign(&bottom_edge, top_left_corner->x0,
+ top_left_corner->y0+height - 1,
+ top_left_corner->x0 + width - 1,
+ top_left_corner->y0 + height - 1);
+
+ /* Finding Left Edge */
+ assign(&left_edge, top_left_corner->x0, top_left_corner->y0,
+ top_left_corner->x0, top_left_corner->y0 + height - 1);
+
+ /* Finding Right Edge */
+ assign(&right_edge, top_left_corner->x0 + width - 1,
+ top_left_corner->y0,
+ top_left_corner->x0 + width - 1, top_left_corner->y0 + height - 1);
+
+ /* dump_area(&top_edge);
+ dump_area(&right_edge);
+ dump_area(&bottom_edge);
+ dump_area(&left_edge);
+ */
+
+ /*Parsing through top & bottom edge*/
+ for (xx = top_edge.x0; xx <= top_edge.x1; ++xx) {
+ if ((top_edge.y0 - 1) < 0) {
+ neighbour_stat->top_boundary++;
+ } else {
+ if (g_area_container[xx][top_edge.y0 - 1].is_occupied)
+ neighbour_stat->top_occupied++;
+ }
+
+ /* Realized that we need to pass through the same iters for
+ bottom edge. So trying to reduce passes by manipulating the
+ checks */
+ if ((bottom_edge.y0 + 1) > (MAX_Y_DIMMENSION - 1)) {
+ neighbour_stat->bottom_boundary++;
+ } else {
+ if (g_area_container[xx][bottom_edge.y0+1].is_occupied)
+ neighbour_stat->bottom_occupied++;
+ }
+
+ }
+
+ /* Parsing throught left and right edge */
+ for (yy = left_edge.y0; yy <= left_edge.y1; ++yy) {
+ if ((left_edge.x0 - 1) < 0) {
+ neighbour_stat->left_boundary++;
+ } else {
+ if (g_area_container[left_edge.x0 - 1][yy].is_occupied)
+ neighbour_stat->left_occupied++;
+ }
+
+ if ((right_edge.x0 + 1) > (MAX_X_DIMMENSION - 1)) {
+ neighbour_stat->right_boundary++;
+ } else {
+ if (g_area_container[right_edge.x0 + 1][yy].is_occupied)
+ neighbour_stat->right_occupied++;
+ }
+
+ }
+ return TilerErrorNone;
+}
+
+/* Test insert
+ * Dummy insertion, No Error Checking.
+ */
+s32 test_insert(IN struct area_spec_list *new_area)
+{
+ struct tiler_page tile;
+ struct area_spec area = new_area->area;
+
+ if (new_area == NULL)
+ return TilerErrorInvalidArg;
+
+ tile.is_occupied = OCCUPIED;
+ tile.reserved = 0;
+ tile.parent_area.x0 = area.x0;
+ tile.parent_area.y0 = area.y0;
+ tile.parent_area.x1 = area.x1;
+ tile.parent_area.y1 = area.y1;
+ tile.type = new_area->area_type;
+
+ if (new_area->area_type == TWO_D)
+ return insert_area_with_tiler_page(&area, tile);
+
+ return insert_pages_with_tiler_page(&area, tile);
+}
+
+s32 test_dump_alloc_list()
+{
+ dump_list_entries(g_allocation_list);
+ return TilerErrorNone;
+}
+
+
+
+s32 test_allocate_2D_area(IN u16 w, IN u16 h, IN enum alignment align,
+ u16 corner, OUT struct area_spec *allocated_area)
+{
+
+ struct area_spec scan_area = {0, 0, 0, 0};
+ s32 ret = TilerErrorNone;
+ u16 stride = ALIGN_STRIDE(align);
+ struct tiler_page tile;
+
+ /*check if width and height are within limits */
+ if (w > MAX_X_DIMMENSION || w == 0 || h > MAX_Y_DIMMENSION || h == 0) {
+ PE("Invalid dimension:: %d x %d\n", w, h);
+ return TilerErrorInvalidDimension;
+ }
+
+ /* Checking for input arguments */
+ if (allocated_area == NULL) {
+ PE("NULL input found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ if (corner == TL_CORNER) {
+ assign(&scan_area, 0, 0, (MAX_X_DIMMENSION - 1),
+ (MAX_Y_DIMMENSION - 1));
+ ret = scan_l2r_t2b(w, h, stride, &scan_area, allocated_area);
+ } else if (corner == TR_CORNER) {
+ assign(&scan_area, (MAX_X_DIMMENSION-1), 0, 0,
+ (MAX_Y_DIMMENSION - 1));
+ ret = scan_r2l_t2b(w, h, stride, &scan_area, allocated_area);
+ } else if (corner == BL_CORNER) {
+ assign(&scan_area, 0, (MAX_Y_DIMMENSION - 1),
+ (MAX_X_DIMMENSION - 1), 0);
+ ret = scan_l2r_b2t(w, h, stride, &scan_area, allocated_area);
+ } else {
+ assign(&scan_area, (MAX_X_DIMMENSION - 1),
+ (MAX_Y_DIMMENSION - 1), 0, 0);
+ ret = scan_r2l_b2t(w, h, stride, &scan_area, allocated_area);
+ }
+
+ MUTEX_LOCK(&g_mutex);
+
+ if (ret != TilerErrorNone) {
+ PE("Did not find anything in the given area\n");
+ } else {
+ P2("Yahoo found a fit: %s\n", AREA_STR(a_str, allocated_area));
+ tile.is_occupied = OCCUPIED;
+ assign(&tile.parent_area, allocated_area->x0,
+ allocated_area->y0, allocated_area->x1, allocated_area->y1);
+
+ /* some id, not useful now */
+ tile.reserved = g_id++;
+
+ /* Saying that type is 2D */
+ tile.type = TWO_D;
+
+ /* inserting into tiler container */
+ insert_area_with_tiler_page(allocated_area, tile);
+
+ /*updating the list of allocations */
+ insert_element(&g_allocation_list, allocated_area, TWO_D);
+ }
+
+ MUTEX_REL(&g_mutex);
+ return ret;
+
+}
+
+s32 test_get_busy_neigh_stats(u16 width, u16 height,
+ struct area_spec *top_left_corner,
+ struct neighbour_stats *neighbour_stat)
+{
+ return get_busy_neigh_stats(width, height, top_left_corner,
+ neighbour_stat);
+}
+
+
+s32 test_check_busy(IN u16 x, u16 y)
+{
+ return (s32)g_area_container[x][y].is_occupied;
+}
+
+/**
+ @description: Retrieves the parent area of the page at x0, y0 if
+ occupied
+ @input:co-ordinates of the page (x0, y0) whoes parent area is required
+ @return 0 on success, non-0 error value on failure. On success
+
+ parent_area will contain co-ordinates (TL & BR corner) of the parent
+ area
+*/
+s32 retrieve_parent_area(u16 x0, u16 y0, struct area_spec *parent_area)
+{
+ if (parent_area == NULL) {
+ PE("NULL input found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ if (x0 < 0 || x0 >= MAX_X_DIMMENSION || y0 < 0 ||
+ y0 >= MAX_Y_DIMMENSION){
+ PE("Invalid dimensions\n");
+ return TilerErrorInvalidDimension;
+ }
+
+ MUTEX_LOCK(&g_mutex);
+
+ assign(parent_area, 0, 0, 0, 0);
+
+ if (g_area_container[x0][y0].is_occupied) {
+ parent_area->x0 = g_area_container[x0][y0].parent_area.x0;
+ parent_area->y0 = g_area_container[x0][y0].parent_area.y0;
+ parent_area->x1 = g_area_container[x0][y0].parent_area.x1;
+ parent_area->y1 = g_area_container[x0][y0].parent_area.y1;
+ }
+
+ MUTEX_REL(&g_mutex);
+ return TilerErrorNone;
+}
+
diff --git a/drivers/media/video/tiler/tcm/tcm.h b/drivers/media/video/tiler/tcm/tcm.h
new file mode 100644
index 000000000000..c01799722fa2
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm.h
@@ -0,0 +1,103 @@
+/*
+ * tcm.h
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Tiler Container manager functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TCM_H_
+#define _TCM_H_
+
+#include "tcm_pri.h"
+#include "tcm_utils.h"
+
+#define OCCUPIED YES
+#define NOT_OCCUPIED NO
+
+#define FIT YES
+#define NO_FIT NO
+
+
+/*Provide inclusive length between co-ordinates */
+#define INCL_LEN(high, low) (high - low + 1)
+
+#define INCL_LEN_MOD(start, end) ((start > end) ? (start-end + 1) : \
+(end - start + 1))
+
+#define TOTAL_BOUNDARY(stat) ((stat)->top_boundary + (stat)->bottom_boundary + \
+ (stat)->left_boundary + (stat)->right_boundary)
+#define TOTAL_OCCUPIED(stat) ((stat)->top_occupied + (stat)->bottom_occupied + \
+ (stat)->left_occupied + (stat)->right_occupied)
+
+
+#define TILER_USER_SPACE
+
+#ifdef TILER_USER_SPACE
+#define MUTEX_LOCK(m) (mutex_lock(m))
+#define MUTEX_REL(m) (mutex_unlock(m))
+#endif
+
+enum Criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+struct nearness_factor {
+ s32 nf_x;
+ s32 nf_y;
+};
+
+
+
+struct tiler_page {
+ u8 is_occupied; /* is tiler_page Occupied */
+ /* Parent area to which this tiler_page belongs */
+ struct area_spec parent_area;
+ u32 type; /* 1D or 2D */
+ u32 reserved;
+};
+
+
+/*@descrption: init_tiler
+ *Initializes the tiler container
+ */
+s32 init_tiler(void);
+
+s32 deinit_tiler(void);
+
+s32 allocate_2d_area(IN u16 w, IN u16 h, IN enum alignment align,
+ OUT struct area_spec *allocated_area);
+
+s32 allocate_1d_pages(IN u32 num_of_pages,
+ OUT struct area_spec *allocated_pages);
+
+s32 deallocate(IN struct area_spec *to_be_removed_area);
+
+s32 test_dump_alloc_list(void);
+
+s32 test_allocate_2D_area(IN u16 w, IN u16 h, IN enum alignment align,
+ u16 corner, OUT struct area_spec *allocated_area);
+
+s32 test_get_busy_neigh_stats(u16 width, u16 height,
+ struct area_spec *top_left_corner,
+ struct neighbour_stats *neighbour_stat);
+
+s32 test_check_busy_tile(IN u16 x, u16 y);
+
+s32 retrieve_parent_area(u16 x0, u16 y0, struct area_spec *parent_area);
+
+#endif
diff --git a/drivers/media/video/tiler/tcm/tcm_dbg.h b/drivers/media/video/tiler/tcm/tcm_dbg.h
new file mode 100644
index 000000000000..072c18d66124
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm_dbg.h
@@ -0,0 +1,61 @@
+/*
+ * tcm_dbg.h
+ *
+ * Debug Utility definitions.
+ *
+ * Copyright (C) 2008-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef _TILER_DEBUG_
+#define _TILER_DEBUG_
+
+#undef DEBUG
+/* #define DEBUG_ENTRY_EXIT */
+
+#undef DEBUG_ALL
+
+
+#ifdef DEBUG
+#define P(fmt, args...) printk(KERN_NOTICE "%s()::[%d]\n", __func__, __LINE__);
+
+#else
+#define P(fmt, args...)
+#endif
+
+#define PE(fmt, args...) printk(KERN_ERR "%s()::[%d]\n", __func__, __LINE__);
+
+#ifdef DEBUG_ALL
+#define L1
+#define L2
+#define L3
+#endif
+
+
+#ifdef L1
+#define P1 P
+#else
+#define P1(args...)
+#endif
+
+#ifdef L2
+#define P2 P
+#else
+#define P2(args...)
+#endif
+
+#ifdef L3
+#define P3 P
+#else
+#define P3(args...)
+#endif
+#endif
+
diff --git a/drivers/media/video/tiler/tcm/tcm_pri.h b/drivers/media/video/tiler/tcm/tcm_pri.h
new file mode 100644
index 000000000000..6f4a33c6801b
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm_pri.h
@@ -0,0 +1,87 @@
+#ifndef _TCM_PRIVATE_H_
+#define _TCM_PRIVATE_H_
+
+#define IN
+#define OUT
+#define INOUT
+
+#define MAX_X_DIMMENSION 256
+#define MAX_Y_DIMMENSION 128
+
+#define YES 1
+#define NO 0
+
+#define TL_CORNER 0
+#define TR_CORNER 1
+#define BL_CORNER 3
+#define BR_CORNER 4
+
+
+/*Note Alignment 64 gets the highest priority */
+#define ALIGN_STRIDE(align)((align == ALIGN_64) ? 64 : ((align == ALIGN_32) ? \
+ 32 : ((align == ALIGN_16) ? 16 : 1)))
+
+enum tiler_error {
+ TilerErrorNone = 0,
+ TilerErrorGeneral = -1,
+ TilerErrorInvalidDimension = -2,
+ TilerErrorNoRoom = -3,
+ TilerErrorInvalidArg = -4,
+ TilerErrorMatchNotFound = -5,
+ TilerErrorOverFlow = -6,
+ TilerErrorInvalidScanArea = -7,
+ TilerErrorNotSupported = -8,
+};
+
+enum alignment {
+ ALIGN_NONE = 0,
+ /*ALIGN_16 = 0x1,*/
+ ALIGN_32 = 0x1, /*0x2 */
+ ALIGN_64 = 0x2, /*0x4 */
+ ALIGN_16 = 0x3 /* 0x1*/
+};
+
+enum dim_type {
+ ONE_D = 1,
+ TWO_D = 2
+};
+
+
+struct area_spec {
+ u16 x0;
+ u16 y0;
+ u16 x1;
+ u16 y1;
+};
+
+
+struct area_spec_list;
+
+struct area_spec_list {
+ struct area_spec area;
+ u16 area_type;
+ struct area_spec_list *next;
+};
+
+
+/*Everything is a rectangle with four sides and on
+ * each side you could have a boundaryor another Tile.
+ * The tile could be Occupied or Not. These info is stored
+ */
+struct neighbour_stats {
+ /* num of tiles on left touching the boundary */
+ u16 left_boundary;
+ /* num of tiles on left that are occupied */
+ u16 left_occupied;
+
+ u16 top_boundary;
+ u16 top_occupied;
+
+ u16 right_boundary;
+ u16 right_occupied;
+
+ u16 bottom_boundary;
+ u16 bottom_occupied;
+};
+
+#endif
diff --git a/drivers/media/video/tiler/tcm/tcm_utils.c b/drivers/media/video/tiler/tcm/tcm_utils.c
new file mode 100644
index 000000000000..b3980d861324
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm_utils.c
@@ -0,0 +1,261 @@
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "tcm_utils.h"
+#include "tcm_dbg.h"
+
+/*
+* Assignment Utility Function
+*/
+void assign(IN struct area_spec *a, IN u16 x0, IN u16 y0, IN u16 x1, IN u16 y1)
+{
+ a->x0 = x0;
+ a->x1 = x1;
+ a->y1 = y1;
+ a->y0 = y0;
+}
+
+void dump_area(struct area_spec *area)
+{
+ printk(KERN_NOTICE "(%d %d) - (%d %d)\n", area->x0, area->y0, area->x1,
+ area->y1);
+}
+
+
+/*
+ * Inserts a given area at the end of a given list
+ */
+s32 insert_element(INOUT struct area_spec_list **list,
+ IN struct area_spec *newArea, IN u16 area_type)
+{
+ struct area_spec_list *list_iter = *list;
+ struct area_spec_list *new_elem = NULL;
+ if (list_iter == NULL) {
+ list_iter = kmalloc(sizeof(struct area_spec_list), GFP_KERNEL);
+ /* P("Created new List: 0x%x\n",list_iter); */
+ assign(&list_iter->area, newArea->x0, newArea->y0,
+ newArea->x1, newArea->y1);
+ list_iter->area_type = area_type;
+ list_iter->next = NULL;
+ *list = list_iter;
+ return TilerErrorNone;
+ }
+
+ /* move till you find the last element */
+ while (list_iter->next != NULL)
+ list_iter = list_iter->next;
+
+ /* now we are the last one */
+ /* P("Adding to the end of list\n"); */
+ /*To Do: Check for malloc failures */
+ new_elem = kmalloc(sizeof(struct area_spec_list), GFP_KERNEL);
+ assign(&new_elem->area, newArea->x0, newArea->y0, newArea->x1,
+ newArea->y1);
+ new_elem->area_type = area_type;
+ new_elem->next = NULL;
+ list_iter->next = new_elem;
+ return TilerErrorNone;
+}
+
+s32 rem_element_with_match(struct area_spec_list **listHead,
+ struct area_spec *to_be_removed, u16 *area_type)
+{
+ struct area_spec_list *temp_list = NULL;
+ struct area_spec_list *matched_elem = NULL;
+ struct area_spec *cur_area = NULL;
+ u8 found_flag = NO;
+
+ /*If the area to be removed matchs the list head itself,
+ we need to put the next one as list head */
+ if (*listHead != NULL) {
+ cur_area = &(*listHead)->area;
+ if (cur_area->x0 == to_be_removed->x0 && cur_area->y0 ==
+ to_be_removed->y0 && cur_area->x1 ==
+ to_be_removed->x1 && cur_area->y1 ==
+ to_be_removed->y1) {
+ *area_type = (*listHead)->area_type;
+ P1("Match found, Now Removing Area : %s\n",
+ AREA_STR(a_str, cur_area));
+
+ temp_list = (*listHead)->next;
+ kfree(*listHead);
+ *listHead = temp_list;
+ return TilerErrorNone;
+ }
+ }
+
+ temp_list = *listHead;
+ while (temp_list != NULL) {
+ /* we have already checked the list head,
+ we check for the second in list */
+ if (temp_list->next != NULL) {
+ cur_area = &temp_list->next->area;
+ if (cur_area->x0 == to_be_removed->x0 && cur_area->y0 ==
+ to_be_removed->y0 && cur_area->x1 ==
+ to_be_removed->x1 && cur_area->y1 ==
+ to_be_removed->y1) {
+ P1("Match found, Now Removing Area : %s\n",
+ AREA_STR(a_str, cur_area));
+ matched_elem = temp_list->next;
+ *area_type = matched_elem->area_type;
+ temp_list->next = matched_elem->next;
+ kfree(matched_elem);
+ matched_elem = NULL;
+ found_flag = YES;
+ break;
+ }
+ }
+ temp_list = temp_list->next;
+ }
+
+ if (found_flag)
+ return TilerErrorNone;
+
+ PE("Match Not found :%s\n", AREA_STR(a_str, to_be_removed));
+
+ return TilerErrorMatchNotFound;
+}
+
+s32 clean_list(struct area_spec_list **list)
+{
+ struct area_spec_list *temp_list = NULL;
+ struct area_spec_list *to_be_rem_elem = NULL;
+
+ if (*list != NULL) {
+ temp_list = (*list)->next;
+ while (temp_list != NULL) {
+ /*P("Freeing :");
+ dump_area(&temp_list->area);*/
+ to_be_rem_elem = temp_list->next;
+ kfree(temp_list);
+ temp_list = to_be_rem_elem;
+ }
+
+ /* freeing the head now */
+ kfree(*list);
+ *list = NULL;
+ }
+
+ return TilerErrorNone;
+}
+
+s32 dump_list_entries(IN struct area_spec_list *list)
+{
+ struct area_spec_list *list_iter = NULL;
+ char a_str[32] = {'\0'};
+ P("Printing List Entries:\n");
+
+ if (list == NULL) {
+ PE("NULL List found\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Now if we have a valid list, let us print the values */
+ list_iter = list;
+ do {
+ printk(KERN_NOTICE "%dD:%s\n", list_iter->area_type,
+ AREA_STR(a_str, &list_iter->area));
+ /* dump_area(&list_iter->area); */
+ list_iter = list_iter->next;
+ } while (list_iter != NULL);
+
+ return TilerErrorNone;
+}
+
+
+
+s32 dump_neigh_stats(struct neighbour_stats *neighbour)
+{
+ P("Top Occ:Boundary %d:%d\n", neighbour->top_occupied,
+ neighbour->top_boundary);
+ P("Bot Occ:Boundary %d:%d\n", neighbour->bottom_occupied,
+ neighbour->bottom_boundary);
+ P("Left Occ:Boundary %d:%d\n", neighbour->left_occupied,
+ neighbour->left_boundary);
+ P("Rigt Occ:Boundary %d:%d\n", neighbour->right_occupied,
+ neighbour->right_boundary);
+ return TilerErrorNone;
+}
+
+s32 move_left(u16 x, u16 y, u32 num_of_pages, u16 *xx, u16 *yy)
+{
+ /* Num of Pages remaining to the left of the same ROW. */
+ u16 num_of_pages_left = x;
+ u16 remain_pages = 0;
+
+ /* I want this function to be really fast and dont take too much time,
+ so i will not do detailed checks */
+ if (x > MAX_X_DIMMENSION || y > MAX_Y_DIMMENSION || xx == NULL ||
+ yy == NULL) {
+ PE("Error in input arguments\n");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Checking if we are going to go out of bound with the given
+ num_of_pages */
+ if (num_of_pages > x + (y * MAX_X_DIMMENSION)) {
+ PE("Overflows off the top left corner, can go at the Max (%d)\
+ to left from (%d, %d)\n", (x + y * MAX_X_DIMMENSION),
+ x, y);
+ return TilerErrorOverFlow;
+ }
+
+ if (num_of_pages > num_of_pages_left) {
+ /* we fit the num of Pages Left on the same column */
+ remain_pages = num_of_pages - num_of_pages_left;
+
+ if (0 == remain_pages % MAX_X_DIMMENSION) {
+ *xx = 0;
+ *yy = y - remain_pages / MAX_X_DIMMENSION;
+ } else {
+ *xx = MAX_X_DIMMENSION - remain_pages %
+ MAX_X_DIMMENSION;
+ *yy = y - (1 + remain_pages / MAX_X_DIMMENSION);
+ }
+ } else {
+ *xx = x - num_of_pages;
+ *yy = y;
+ }
+
+ return TilerErrorNone;
+}
+
+s32 move_right(u16 x, u16 y, u32 num_of_pages, u16 *xx, u16 *yy)
+{
+ u32 avail_pages = (MAX_X_DIMMENSION - x - 1) +
+ (MAX_Y_DIMMENSION - y - 1) * MAX_X_DIMMENSION;
+ /* Num of Pages remaining to the Right of the same ROW. */
+ u16 num_of_pages_right = MAX_X_DIMMENSION - 1 - x;
+ u16 remain_pages = 0;
+
+ if (x > MAX_X_DIMMENSION || y > MAX_Y_DIMMENSION || xx == NULL ||
+ yy == NULL) {
+ PE("Error in input arguments");
+ return TilerErrorInvalidArg;
+ }
+
+ /*Checking if we are going to go out of bound with the given
+ num_of_pages */
+ if (num_of_pages > avail_pages) {
+ PE("Overflows off the top Right corner, can go at the Max (%d)\
+ to Right from (%d, %d)\n", avail_pages, x, y);
+ return TilerErrorOverFlow;
+ }
+
+ if (num_of_pages > num_of_pages_right) {
+ remain_pages = num_of_pages - num_of_pages_right;
+
+ if (0 == remain_pages % MAX_X_DIMMENSION) {
+ *xx = MAX_X_DIMMENSION - 1;
+ *yy = y + remain_pages / MAX_X_DIMMENSION;
+ } else {
+ *xx = remain_pages % MAX_X_DIMMENSION - 1;
+ *yy = y + (1 + remain_pages / MAX_X_DIMMENSION);
+ }
+ } else {
+ *xx = x + num_of_pages;
+ *yy = y;
+ }
+
+ return TilerErrorNone;
+}
diff --git a/drivers/media/video/tiler/tcm/tcm_utils.h b/drivers/media/video/tiler/tcm/tcm_utils.h
new file mode 100644
index 000000000000..ac6a02d2cb43
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm_utils.h
@@ -0,0 +1,29 @@
+#ifndef _TILER_UTILS_H
+#define _TILER_UTILS_H
+
+#include "tcm_pri.h"
+#include "tcm_dbg.h"
+
+#define AREA_STR(a_str, area) ({ \
+ sprintf(a_str, "(%03d %03d)-(%03d %03d)", (area)->x0, (area)->y0, \
+ (area)->x1, (area)->y1); a_str; })
+
+void assign(struct area_spec *a, u16 x0, u16 y0, u16 x1, u16 y1);
+void dump_area(struct area_spec *area);
+
+s32 insert_element(INOUT struct area_spec_list **list,
+ IN struct area_spec *newArea, IN u16 area_type);
+
+s32 dump_list_entries(IN struct area_spec_list *list);
+
+s32 dump_neigh_stats(struct neighbour_stats *neighbour);
+
+s32 rem_element_with_match(struct area_spec_list **list,
+ struct area_spec *to_be_removed, u16 *area_type);
+
+s32 clean_list(struct area_spec_list **list);
+
+s32 move_left(u16 x, u16 y, u32 num_of_pages, u16 *xx, u16 *yy);
+s32 move_right(u16 x, u16 y, u32 num_of_pages, u16 *xx, u16 *yy);
+
+#endif
diff --git a/drivers/media/video/tiler/tiler.c b/drivers/media/video/tiler/tiler.c
new file mode 100644
index 000000000000..13841678bf5b
--- /dev/null
+++ b/drivers/media/video/tiler/tiler.c
@@ -0,0 +1,885 @@
+/*
+ * tiler.c
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h> /* struct cdev */
+#include <linux/kdev_t.h> /* MKDEV() */
+#include <linux/fs.h> /* register_chrdev_region() */
+#include <linux/device.h> /* struct class */
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/err.h> /* IS_ERR() */
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h> /* page_cache_release() */
+
+#include "tiler.h"
+#include "tiler_def.h"
+#include "../dmm/dmm.h"
+#include "tcm/tcm.h"
+
+struct tiler_dev {
+ struct cdev cdev;
+};
+
+struct platform_driver tiler_driver_ldm = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tiler",
+ },
+ .probe = NULL,
+ .shutdown = NULL,
+ .remove = NULL,
+};
+
+struct __buf_info {
+ struct list_head list;
+ struct tiler_buf_info buf_info;
+};
+
+struct mem_info {
+ struct list_head list;
+ u32 sys_addr; /* system space (L3) tiler addr */
+ u32 *page; /* virt addr to page-list */
+ dma_addr_t page_pa; /* phys addr to page-list */
+ u32 size; /* size of page-list */
+ u32 num_pg; /* number of pages in page-list */
+ s8 mapped; /* flag to indicate user mapped mem */
+ u32 usr; /* user space address */
+ u32 *pg_ptr; /* list of struct page pointers */
+ struct area_spec area; /* return struct from tiler area mgr */
+ u32 *mem; /* pointer to list of phys addresses */
+};
+
+static s32 tiler_major;
+static s32 tiler_minor;
+static struct tiler_dev *tiler_device;
+static struct class *tilerdev_class;
+static u32 id;
+static struct __buf_info buf_list;
+static struct mem_info mem_list;
+static struct mutex mtx;
+
+static s32 __set_area(enum tiler_fmt fmt, u32 width, u32 height, u8 *x_area,
+ u8 *y_area)
+{
+ s32 x_pagedim = 0, y_pagedim = 0;
+ u16 tiled_pages_per_ss_page = 0;
+
+ switch (fmt) {
+ case TILFMT_8BIT:
+ x_pagedim = DMM_PAGE_DIMM_X_MODE_8;
+ y_pagedim = DMM_PAGE_DIMM_Y_MODE_8;
+ tiled_pages_per_ss_page = TILER_PAGE / x_pagedim;
+ break;
+ case TILFMT_16BIT:
+ x_pagedim = DMM_PAGE_DIMM_X_MODE_16;
+ y_pagedim = DMM_PAGE_DIMM_Y_MODE_16;
+ tiled_pages_per_ss_page = TILER_PAGE / x_pagedim / 2;
+ break;
+ case TILFMT_32BIT:
+ x_pagedim = DMM_PAGE_DIMM_X_MODE_32;
+ y_pagedim = DMM_PAGE_DIMM_Y_MODE_32;
+ tiled_pages_per_ss_page = TILER_PAGE / x_pagedim / 4;
+ break;
+ case TILFMT_PAGE:
+ x_pagedim = DMM_PAGE_DIMM_X_MODE_8;
+ y_pagedim = DMM_PAGE_DIMM_Y_MODE_8;
+ width = ((width + TILER_PAGE - 1)/TILER_PAGE);
+ tiled_pages_per_ss_page = 1;
+
+ /*
+ * For 1D blocks larger than the container width,
+ * we need to allocate multiple rows.
+ */
+ if (width > TILER_WIDTH) {
+ height = (width + TILER_WIDTH - 1) / TILER_WIDTH;
+ width = TILER_WIDTH;
+ } else {
+ height = 1;
+ }
+
+ height *= x_pagedim;
+ width *= y_pagedim;
+ break;
+ default:
+ return -1;
+ break;
+ }
+
+ *x_area = (u8)((width + x_pagedim - 1) / x_pagedim - 1);
+ *y_area = (u8)((height + y_pagedim - 1) / y_pagedim - 1);
+
+ tiled_pages_per_ss_page = 64;
+ *x_area = (u8)(((*x_area + tiled_pages_per_ss_page) &
+ ~(tiled_pages_per_ss_page - 1)) - 1);
+
+ if (*x_area > TILER_WIDTH || *y_area > TILER_HEIGHT)
+ return -1;
+ return 0x0;
+}
+
+static s32 get_area(u32 sys_addr, u32 *x_area, u32 *y_area)
+{
+ enum tiler_fmt fmt;
+
+ sys_addr &= TILER_ALIAS_VIEW_CLEAR;
+ fmt = TILER_GET_ACC_MODE(sys_addr);
+
+ switch (fmt + 1) {
+ case TILFMT_8BIT:
+ *x_area = DMM_HOR_X_PAGE_COOR_GET_8(sys_addr);
+ *y_area = DMM_HOR_Y_PAGE_COOR_GET_8(sys_addr);
+ break;
+ case TILFMT_16BIT:
+ *x_area = DMM_HOR_X_PAGE_COOR_GET_16(sys_addr);
+ *y_area = DMM_HOR_Y_PAGE_COOR_GET_16(sys_addr);
+ break;
+ case TILFMT_32BIT:
+ *x_area = DMM_HOR_X_PAGE_COOR_GET_32(sys_addr);
+ *y_area = DMM_HOR_Y_PAGE_COOR_GET_32(sys_addr);
+ break;
+ case TILFMT_PAGE:
+ *x_area = (sys_addr & 0x7FFFFFF) >> 12;
+ *y_area = *x_area / 256;
+ *x_area &= 255;
+ break;
+ default:
+ return -EFAULT;
+ }
+ return 0x0;
+}
+
+static u32 __get_alias_addr(enum tiler_fmt fmt, u16 x, u16 y)
+{
+ u32 acc_mode = -1;
+ u32 x_shft = -1, y_shft = -1;
+
+ switch (fmt) {
+ case TILFMT_8BIT:
+ acc_mode = 0; x_shft = 6; y_shft = 20;
+ break;
+ case TILFMT_16BIT:
+ acc_mode = 1; x_shft = 7; y_shft = 20;
+ break;
+ case TILFMT_32BIT:
+ acc_mode = 2; x_shft = 7; y_shft = 20;
+ break;
+ case TILFMT_PAGE:
+ acc_mode = 3; y_shft = 8;
+ break;
+ default:
+ return 0;
+ break;
+ }
+
+ if (fmt == TILFMT_PAGE)
+ return (u32)TIL_ALIAS_ADDR((x | y << y_shft) << 12, acc_mode);
+ else
+ return (u32)TIL_ALIAS_ADDR(x << x_shft | y << y_shft, acc_mode);
+}
+
+
+static s32 tiler_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct __buf_info *_b = NULL;
+ struct tiler_buf_info *b = NULL;
+ s32 i = 0, j = 0, k = 0, m = 0, p = 0, bpp = 1;
+ struct list_head *pos = NULL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ mutex_lock(&mtx);
+ list_for_each(pos, &buf_list.list) {
+ _b = list_entry(pos, struct __buf_info, list);
+ if ((vma->vm_pgoff << PAGE_SHIFT) == _b->buf_info.offset)
+ break;
+ }
+ mutex_unlock(&mtx);
+ if (!_b)
+ return -EFAULT;
+
+ b = &_b->buf_info;
+
+ for (i = 0; i < b->num_blocks; i++) {
+ if (b->blocks[i].fmt >= TILFMT_8BIT &&
+ b->blocks[i].fmt <= TILFMT_32BIT) {
+ /* get line width */
+ bpp = (b->blocks[i].fmt == TILFMT_8BIT ? 1 :
+ b->blocks[i].fmt == TILFMT_16BIT ? 2 : 4);
+ p = (b->blocks[i].dim.area.width * bpp +
+ TILER_PAGE - 1) & ~(TILER_PAGE - 1);
+
+ for (j = 0; j < b->blocks[i].dim.area.height; j++) {
+ /* map each page of the line */
+ vma->vm_pgoff =
+ (b->blocks[i].ssptr + m) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ (b->blocks[i].ssptr + m) >> PAGE_SHIFT,
+ p, vma->vm_page_prot))
+ return -EAGAIN;
+ k += p;
+ if (b->blocks[i].fmt == TILFMT_8BIT)
+ m += 64*TILER_WIDTH;
+ else
+ m += 2*64*TILER_WIDTH;
+ }
+ m = 0;
+ } else if (b->blocks[i].fmt == TILFMT_PAGE) {
+ vma->vm_pgoff = (b->blocks[i].ssptr) >> PAGE_SHIFT;
+ p = (b->blocks[i].dim.len + TILER_PAGE - 1) &
+ ~(TILER_PAGE - 1);
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ (b->blocks[i].ssptr) >> PAGE_SHIFT, p,
+ vma->vm_page_prot))
+ return -EAGAIN;;
+ k += p;
+ }
+ }
+ return 0;
+}
+
+static s32 map_buffer(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr,
+ u32 usr_addr)
+{
+ u16 num_page = 0, x_page = 0, y_page = 0;
+ u32 i = 0, tmp = -1;
+ u8 x_area = 0, y_area = 0, write = 0;
+ struct pat pat_desc = {0};
+ struct mem_info *mi = NULL;
+ struct page *page = NULL;
+ struct task_struct *curr_task = current;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = NULL;
+
+ /* we only support mapping a user buffer in page mode */
+ if (fmt != TILFMT_PAGE)
+ return -EFAULT;
+
+ /* reserve area in tiler container */
+ if (__set_area(fmt, width, height, &x_area, &y_area))
+ return -EFAULT;
+
+ mi = kmalloc(sizeof(struct mem_info), GFP_KERNEL);
+ if (!mi)
+ return -ENOMEM;
+ memset(mi, 0x0, sizeof(struct mem_info));
+
+ if (allocate_1d_pages(ROUND_UP((x_area + 1) * (y_area + 1), 256),
+ &mi->area)) {
+ kfree(mi); return -ENOMEM; }
+
+ /* formulate system space address */
+ *sys_addr = __get_alias_addr(fmt, mi->area.x0, mi->area.y0);
+
+ /* allocate pages */
+ /* TODO: page count should be u8 @ 256 max */
+ /* TODO: num_pages should be u16 @ 32k max */
+ x_page = mi->area.x1 - mi->area.x0 + 1;
+ y_page = mi->area.y1 - mi->area.y0 + 1;
+ num_page = x_page * y_page;
+
+ mi->sys_addr = *sys_addr;
+ mi->usr = usr_addr;
+
+ /*
+ * Important Note: usr_addr is mapped from user
+ * application process to current process - it must lie
+ * completely within the current virtual memory address
+ * space in order to be of use to us here.
+ */
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, mi->usr);
+
+ /*
+ * It is observed that under some circumstances, the user
+ * buffer is spread across several vmas, so loop through
+ * and check if the entire user buffer is covered.
+ */
+
+ while ((vma) && (mi->usr + width > vma->vm_end)) {
+ /* jump to the next VMA region */
+ vma = find_vma(mm, vma->vm_end + 1);
+ }
+ if (!vma) {
+ printk(KERN_ERR "Failed to get the vma region for \
+ user buffer.\n");
+ up_read(&mm->mmap_sem);
+ kfree(mi);
+ return -EFAULT;
+ }
+
+ mi->num_pg = width / TILER_PAGE;
+ tmp = ROUND_UP(mi->num_pg, 256); /* (mi->num_pg + 63) & ~63; */
+
+ /*
+ * Check that the number of user pages match what
+ * the container manager calculates.
+ */
+ if (num_page != tmp)
+ goto free;
+ mi->size = tmp * 4;
+
+ mi->page = dma_alloc_coherent(NULL, mi->size, &mi->page_pa, GFP_ATOMIC);
+ if (!mi->page) {
+ up_read(&mm->mmap_sem);
+ goto free;
+ }
+ memset(mi->page, 0x0, mi->size);
+
+ mi->pg_ptr = kmalloc(mi->size, GFP_KERNEL);
+ if (!mi->pg_ptr) {
+ up_read(&mm->mmap_sem);
+ goto free;
+ }
+ memset(mi->pg_ptr, 0x0, mi->size);
+
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ tmp = mi->usr;
+ for (i = 0; i < mi->num_pg; i++) {
+ if (get_user_pages(curr_task, mm, tmp, 1, write, 1, &page,
+ NULL)) {
+ if (page_count(page) < 1) {
+ printk(KERN_ERR "Bad page count from\
+ get_user_pages()\n");
+ }
+ mi->pg_ptr[i] = (u32)page;
+ mi->page[i] = page_to_phys(page);
+ tmp += TILER_PAGE;
+ } else {
+ printk(KERN_ERR "get_user_pages() failed\n");
+ up_read(&mm->mmap_sem);
+ goto free;
+ }
+ }
+ up_read(&mm->mmap_sem);
+
+ mi->mapped = 1;
+ mutex_lock(&mtx);
+ list_add(&mi->list, &mem_list.list);
+ mutex_unlock(&mtx);
+
+ /* send pat descriptor to dmm driver */
+ pat_desc.area.x0 = mi->area.x0;
+ pat_desc.area.y0 = mi->area.y0;
+ pat_desc.area.x1 = mi->area.x1;
+ pat_desc.area.y1 = mi->area.y1;
+ pat_desc.ctrl.dir = 0;
+ pat_desc.ctrl.ini = 0;
+ pat_desc.ctrl.lut_id = 0;
+ pat_desc.ctrl.start = 1;
+ pat_desc.ctrl.sync = 0;
+ pat_desc.next = NULL;
+
+ /* must be a 16-byte aligned physical address */
+ pat_desc.data = mi->page_pa;
+
+ if (dmm_pat_refill(&pat_desc, MANUAL))
+ goto release;
+ return 0;
+release:
+ for (i = 0; i < mi->num_pg; i++) {
+ page = (struct page *)mi->pg_ptr[i];
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ page_cache_release(page);
+ }
+free:
+ kfree(mi->pg_ptr);
+ if (mi->page && mi->page_pa)
+ dma_free_coherent(NULL, mi->size, mi->page, mi->page_pa);
+ kfree(mi);
+ return -ENOMEM;
+}
+
+s32 tiler_free(u32 sys_addr)
+{
+ u32 i = 0;
+ struct list_head *pos = NULL, *q = NULL;
+ struct mem_info *mi = NULL;
+ struct page *page = NULL;
+
+ mutex_lock(&mtx);
+ list_for_each_safe(pos, q, &mem_list.list) {
+ mi = list_entry(pos, struct mem_info, list);
+ if (mi->sys_addr == sys_addr) {
+ if (deallocate(&mi->area))
+ printk(KERN_NOTICE "warning: failed to\
+ unreserve tiler area.\n");
+ if (!mi->mapped) {
+ dmm_free_pages(mi->mem);
+ } else {
+ for (i = 0; i < mi->num_pg; i++) {
+ page = (struct page *)mi->pg_ptr[i];
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ page_cache_release(page);
+ }
+ kfree(mi->pg_ptr);
+ }
+ dma_free_coherent(NULL, mi->size, mi->page,
+ mi->page_pa);
+ list_del(pos);
+ kfree(mi);
+ }
+ }
+ mutex_unlock(&mtx);
+ if (!mi)
+ return -EFAULT;
+
+ /* for debugging, we can set the PAT entries to DMM_LISA_MAP__0 */
+ return 0x0;
+}
+EXPORT_SYMBOL(tiler_free);
+
+/* :TODO: Currently we do not track enough information from alloc to get back
+ the actual width and height of the container, so we must make a guess. We
+ do not even have enough information to get the virtual stride of the buffer,
+ which is the real reason for this ioctl */
+s32 tiler_find_buf(u32 sys_addr, struct tiler_block_info *blk)
+{
+ struct area_spec area = {0};
+ u32 x_area = -1, y_area = -1;
+ u16 x_page = 0, y_page = 0;
+ s32 mode = -1;
+
+ if (get_area(sys_addr & 0x0FFFF1000, &x_area, &y_area))
+ return -EFAULT;
+
+ if (retrieve_parent_area(x_area, y_area, &area))
+ return -EFAULT;
+
+ x_page = area.x1 - area.x0 + 1;
+ y_page = area.y1 - area.y0 + 1;
+
+ blk->ptr = NULL;
+
+ mode = TILER_GET_ACC_MODE(sys_addr);
+ blk->fmt = (mode + 1);
+ if (blk->fmt == TILFMT_PAGE) {
+ blk->dim.len = x_page * y_page * TILER_PAGE;
+ if (blk->dim.len == 0)
+ goto error;
+ blk->stride = 0;
+ blk->ssptr = (u32)TIL_ALIAS_ADDR(((area.x0 |
+ (area.y0 << 8)) << 12), mode);
+ } else {
+ blk->stride = blk->dim.area.width = x_page * TILER_BLOCK_WIDTH;
+ blk->dim.area.height = y_page * TILER_BLOCK_HEIGHT;
+ if (blk->dim.area.width == 0 || blk->dim.area.height == 0)
+ goto error;
+ if (blk->fmt == TILFMT_8BIT) {
+ blk->ssptr = (u32)TIL_ALIAS_ADDR(((area.x0 << 6) |
+ (area.y0 << 20)), mode);
+ } else {
+ blk->ssptr = (u32)TIL_ALIAS_ADDR(((area.x0 << 7) |
+ (area.y0 << 20)), mode);
+ blk->stride <<= 1;
+ blk->dim.area.height >>= 1;
+ if (blk->fmt == TILFMT_32BIT)
+ blk->dim.area.width >>= 1;
+ }
+ blk->stride = (blk->stride + TILER_PAGE - 1) &
+ ~(TILER_PAGE - 1);
+ }
+ return 0;
+
+error:
+ blk->fmt = TILFMT_INVALID;
+ blk->dim.len = blk->stride = blk->ssptr = 0;
+ return -EFAULT;
+}
+
+static s32 tiler_ioctl(struct inode *ip, struct file *filp, u32 cmd,
+ unsigned long arg)
+{
+ pgd_t *pgd = NULL;
+ pmd_t *pmd = NULL;
+ pte_t *ptep = NULL, pte = 0x0;
+ s32 r = -1;
+ u32 til_addr = 0x0;
+
+ struct __buf_info *_b = NULL;
+ struct tiler_buf_info buf_info = {0};
+ struct tiler_block_info block_info = {0};
+ struct list_head *pos = NULL, *q = NULL;
+
+ switch (cmd) {
+ case TILIOC_GBUF:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+
+ switch (block_info.fmt) {
+ case TILFMT_PAGE:
+ r = tiler_alloc(block_info.fmt, block_info.dim.len, 1,
+ &til_addr);
+ if (r)
+ return r;
+ break;
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ r = tiler_alloc(block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height, &til_addr);
+ if (r)
+ return r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ block_info.ssptr = til_addr;
+ if (copy_to_user((void __user *)arg, &block_info,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+ break;
+ case TILIOC_FBUF:
+ case TILIOC_UMBUF:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+
+ if (tiler_free(block_info.ssptr))
+ return -EFAULT;
+ break;
+ case TILIOC_GSSP:
+ pgd = pgd_offset(current->mm, arg);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, arg);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, arg);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return (pte & PAGE_MASK) |
+ (~PAGE_MASK & arg);
+ }
+ }
+ }
+ /* va not in page table */
+ return 0x0;
+ break;
+ case TILIOC_MBUF:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+
+ if (!block_info.ptr)
+ return -EFAULT;
+
+ if (map_buffer(block_info.fmt, block_info.dim.len, 1,
+ &block_info.ssptr, (u32)block_info.ptr))
+ return -ENOMEM;
+
+ if (copy_to_user((void __user *)arg, &block_info,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+ break;
+ case TILIOC_QBUF:
+ if (copy_from_user(&buf_info, (void __user *)arg,
+ sizeof(struct tiler_buf_info)))
+ return -EFAULT;
+
+ mutex_lock(&mtx);
+ list_for_each(pos, &buf_list.list) {
+ _b = list_entry(pos, struct __buf_info, list);
+ if (buf_info.offset == _b->buf_info.offset) {
+ if (copy_to_user((void __user *)arg,
+ &_b->buf_info,
+ sizeof(struct tiler_buf_info))) {
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ } else {
+ mutex_unlock(&mtx);
+ return 0;
+ }
+ }
+ }
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ break;
+ case TILIOC_RBUF:
+ _b = kmalloc(sizeof(struct __buf_info), GFP_KERNEL);
+ if (!_b)
+ return -ENOMEM;
+ memset(_b, 0x0, sizeof(struct __buf_info));
+
+ if (copy_from_user(&_b->buf_info, (void __user *)arg,
+ sizeof(struct tiler_buf_info))) {
+ kfree(_b); return -EFAULT;
+ }
+
+ _b->buf_info.offset = id;
+ id += 0x1000;
+
+ mutex_lock(&mtx);
+ list_add(&(_b->list), &buf_list.list);
+ mutex_unlock(&mtx);
+
+ if (copy_to_user((void __user *)arg, &_b->buf_info,
+ sizeof(struct tiler_buf_info))) {
+ kfree(_b); return -EFAULT;
+ }
+ break;
+ case TILIOC_URBUF:
+ if (copy_from_user(&buf_info, (void __user *)arg,
+ sizeof(struct tiler_buf_info)))
+ return -EFAULT;
+
+ mutex_lock(&mtx);
+ list_for_each_safe(pos, q, &buf_list.list) {
+ _b = list_entry(pos, struct __buf_info, list);
+ if (buf_info.offset == _b->buf_info.offset) {
+ list_del(pos);
+ kfree(_b);
+ mutex_unlock(&mtx);
+ return 0;
+ }
+ }
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ break;
+ case TILIOC_QUERY_BLK:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+
+ if (tiler_find_buf(block_info.ssptr, &block_info))
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)arg, &block_info,
+ sizeof(struct tiler_block_info)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0x0;
+}
+
+s32 tiler_alloc(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr)
+{
+ u16 num_page = 0, x_page = 0, y_page = 0;
+ u8 x_area = 0, y_area = 0;
+ struct pat pat_desc = {0};
+ struct mem_info *mi = NULL;
+
+ /* reserve area in tiler container */
+ if (__set_area(fmt, width, height, &x_area, &y_area))
+ return -EFAULT;
+
+ mi = kmalloc(sizeof(struct mem_info), GFP_KERNEL);
+ if (!mi)
+ return -ENOMEM;
+ memset(mi, 0x0, sizeof(struct mem_info));
+
+ switch (fmt) {
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ if (allocate_2d_area(x_area + 1, y_area + 1, ALIGN_64,
+ &mi->area)) {
+ kfree(mi); return -ENOMEM; }
+ break;
+ case TILFMT_PAGE:
+ if (allocate_1d_pages(
+ ROUND_UP((x_area + 1) * (y_area + 1), 256),
+ &mi->area)) {
+ kfree(mi); return -ENOMEM; }
+ break;
+ default:
+ kfree(mi);
+ return -ENOMEM;
+ }
+
+ /* formulate system space address */
+ *sys_addr = __get_alias_addr(fmt, mi->area.x0, mi->area.y0);
+
+ /* allocate pages */
+ /* TODO: page count should be u8 @ 256 max */
+ /* TODO: num_pages should be u16 @ 32k max */
+ x_page = mi->area.x1 - mi->area.x0 + 1;
+ y_page = mi->area.y1 - mi->area.y0 + 1;
+ num_page = x_page * y_page;
+
+ mi->size = num_page * 4;
+ mi->page = dma_alloc_coherent(NULL, mi->size, &mi->page_pa, GFP_ATOMIC);
+ if (!mi->page) {
+ kfree(mi); return -ENOMEM;
+ }
+ memset(mi->page, 0x0, mi->size);
+
+ mi->mem = dmm_get_pages(num_page);
+ if (!mi->mem)
+ goto cleanup;
+ memcpy(mi->page, mi->mem, mi->size);
+
+ mi->num_pg = num_page;
+ mi->sys_addr = *sys_addr;
+
+ mutex_lock(&mtx);
+ list_add(&(mi->list), &mem_list.list);
+ mutex_unlock(&mtx);
+
+ /* send pat descriptor to dmm driver */
+ pat_desc.area.x0 = mi->area.x0;
+ pat_desc.area.y0 = mi->area.y0;
+ pat_desc.area.x1 = mi->area.x1;
+ pat_desc.area.y1 = mi->area.y1;
+ pat_desc.ctrl.dir = 0;
+ pat_desc.ctrl.ini = 0;
+ pat_desc.ctrl.lut_id = 0;
+ pat_desc.ctrl.start = 1;
+ pat_desc.ctrl.sync = 0;
+ pat_desc.next = NULL;
+
+ /* must be a 16-byte aligned physical address */
+ pat_desc.data = mi->page_pa;
+
+ if (dmm_pat_refill(&pat_desc, MANUAL))
+ goto cleanup;
+
+ return 0x0;
+cleanup:
+ dma_free_coherent(NULL, mi->size, mi->page, mi->page_pa);
+ kfree(mi);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(tiler_alloc);
+
+static void __exit tiler_exit(void)
+{
+ struct __buf_info *_b = NULL;
+ struct mem_info *mi = NULL;
+ struct list_head *pos = NULL, *q = NULL;
+ u32 i = -1;
+
+ deinit_tiler();
+
+ /* remove any leftover info structs that haven't been unregistered */
+ mutex_lock(&mtx);
+ pos = NULL, q = NULL;
+ list_for_each_safe(pos, q, &buf_list.list) {
+ _b = list_entry(pos, struct __buf_info, list);
+ list_del(pos);
+ kfree(_b);
+ }
+ mutex_unlock(&mtx);
+
+ /* free any remaining mem structures */
+ mutex_lock(&mtx);
+ pos = NULL, q = NULL;
+ list_for_each_safe(pos, q, &mem_list.list) {
+ mi = list_entry(pos, struct mem_info, list);
+ for (i = 0; i < mi->num_pg; i++)
+ dmm_free_page(mi->page[i]);
+ dma_free_coherent(NULL, mi->size, mi->page, mi->page_pa);
+ list_del(pos);
+ kfree(mi);
+ }
+ mutex_unlock(&mtx);
+
+ mutex_destroy(&mtx);
+ platform_driver_unregister(&tiler_driver_ldm);
+ cdev_del(&tiler_device->cdev);
+ kfree(tiler_device);
+ device_destroy(tilerdev_class, MKDEV(tiler_major, tiler_minor));
+ class_destroy(tilerdev_class);
+}
+
+static s32 tiler_open(struct inode *ip, struct file *filp)
+{
+ return 0x0;
+}
+
+static s32 tiler_release(struct inode *ip, struct file *filp)
+{
+ return 0x0;
+}
+
+static const struct file_operations tiler_fops = {
+ .open = tiler_open,
+ .ioctl = tiler_ioctl,
+ .release = tiler_release,
+ .mmap = tiler_mmap,
+};
+
+static s32 __init tiler_init(void)
+{
+ dev_t dev = 0;
+ s32 r = -1;
+ struct device *device = NULL;
+
+ if (tiler_major) {
+ dev = MKDEV(tiler_major, tiler_minor);
+ r = register_chrdev_region(dev, 1, "tiler");
+ } else {
+ r = alloc_chrdev_region(&dev, tiler_minor, 1, "tiler");
+ tiler_major = MAJOR(dev);
+ }
+
+ tiler_device = kmalloc(sizeof(struct tiler_dev), GFP_KERNEL);
+ if (!tiler_device) {
+ unregister_chrdev_region(dev, 1);
+ return -ENOMEM;
+ }
+ memset(tiler_device, 0x0, sizeof(struct tiler_dev));
+
+ cdev_init(&tiler_device->cdev, &tiler_fops);
+ tiler_device->cdev.owner = THIS_MODULE;
+ tiler_device->cdev.ops = &tiler_fops;
+
+ r = cdev_add(&tiler_device->cdev, dev, 1);
+ if (r)
+ printk(KERN_ERR "cdev_add():failed\n");
+
+ tilerdev_class = class_create(THIS_MODULE, "tiler");
+
+ if (IS_ERR(tilerdev_class)) {
+ printk(KERN_ERR "class_create():failed\n");
+ goto EXIT;
+ }
+
+ device = device_create(tilerdev_class, NULL, dev, NULL, "tiler");
+ if (device == NULL)
+ printk(KERN_ERR "device_create() fail\n");
+
+ r = platform_driver_register(&tiler_driver_ldm);
+
+ mutex_init(&mtx);
+ INIT_LIST_HEAD(&buf_list.list);
+ INIT_LIST_HEAD(&mem_list.list);
+ id = 0xda7a000;
+ init_tiler();
+
+EXIT:
+ return r;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("davidsin@ti.com");
+module_init(tiler_init);
+module_exit(tiler_exit);
diff --git a/drivers/media/video/tiler/tiler.h b/drivers/media/video/tiler/tiler.h
new file mode 100644
index 000000000000..b8adf1439b6f
--- /dev/null
+++ b/drivers/media/video/tiler/tiler.h
@@ -0,0 +1,116 @@
+/*
+ * tiler.h
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef TILER_H
+#define TILER_H
+
+#define TILER_PAGE 0x1000
+#define TILER_WIDTH 256
+#define TILER_HEIGHT 128
+#define TILER_BLOCK_WIDTH 64
+#define TILER_BLOCK_HEIGHT 64
+#define TILER_LENGTH (TILER_WIDTH * TILER_HEIGHT * TILER_PAGE)
+
+#define TILER_MAX_NUM_BLOCKS 16
+
+#define TILIOC_GBUF _IOWR('z', 100, u32)
+#define TILIOC_FBUF _IOWR('z', 101, u32)
+#define TILIOC_GSSP _IOWR('z', 102, u32)
+#define TILIOC_MBUF _IOWR('z', 103, u32)
+#define TILIOC_UMBUF _IOWR('z', 104, u32)
+#define TILIOC_QBUF _IOWR('z', 105, u32)
+#define TILIOC_RBUF _IOWR('z', 106, u32)
+#define TILIOC_URBUF _IOWR('z', 107, u32)
+#define TILIOC_QUERY_BLK _IOWR('z', 108, u32)
+
+enum tiler_fmt {
+ TILFMT_MIN = -1,
+ TILFMT_INVALID = -1,
+ TILFMT_NONE = 0,
+ TILFMT_8BIT = 1,
+ TILFMT_16BIT = 2,
+ TILFMT_32BIT = 3,
+ TILFMT_PAGE = 4,
+ TILFMT_MAX = 4
+};
+
+struct area {
+ u16 width;
+ u16 height;
+};
+
+struct tiler_block_info {
+ enum tiler_fmt fmt;
+ union {
+ struct area area;
+ u32 len;
+ } dim;
+ u32 stride;
+ void *ptr;
+ u32 ssptr;
+};
+
+struct tiler_buf_info {
+ s32 num_blocks;
+ struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
+ s32 offset;
+};
+
+struct tiler_view_orient {
+ u8 rotate_90;
+ u8 x_invert;
+ u8 y_invert;
+};
+
+/**
+ * Request a 1-D or 2-D TILER buffer.
+ *
+ * @param fmt TILER bit mode.
+ * @param width buffer width.
+ * @param height buffer height.
+ * @param sys_addr system space (L3) address.
+ *
+ * @return an error status.
+ */
+s32 tiler_alloc(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr);
+
+/**
+ * Free TILER memory.
+ * @param sys_addr system space (L3) address.
+ * @return an error status.
+ */
+s32 tiler_free(u32 sys_addr);
+
+u32 tiler_reorient_addr(u32 tsptr, struct tiler_view_orient orient);
+
+u32 tiler_get_natural_addr(void *sys_ptr);
+
+u32 tiler_reorient_topleft(u32 tsptr, struct tiler_view_orient orient,
+ u32 width, u32 height);
+
+u32 tiler_stride(u32 tsptr);
+
+void tiler_rotate_view(struct tiler_view_orient *orient, u32 rotation);
+
+void tiler_alloc_packed(s32 *count, enum tiler_fmt fmt, u32 width, u32 height,
+ void **sysptr, void **allocptr, s32 aligned);
+
+void tiler_alloc_packed_nv12(s32 *count, u32 width, u32 height, void **y_sysptr,
+ void **uv_sysptr, void **y_allocptr,
+ void **uv_allocptr, s32 aligned);
+
+#endif
+
diff --git a/drivers/media/video/tiler/tiler_def.h b/drivers/media/video/tiler/tiler_def.h
new file mode 100644
index 000000000000..de73b8eccb42
--- /dev/null
+++ b/drivers/media/video/tiler/tiler_def.h
@@ -0,0 +1,158 @@
+/*
+ * tiler_def.h
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef TILER_DEF_H
+#define TILER_DEF_H
+
+#define ROUND_UP_2P(a, b) (((a) + (b) - 1) & ~((b) - 1))
+#define DIVIDE_UP(a, b) (((a) + (b) - 1) / (b))
+#define ROUND_UP(a, b) (DIVIDE_UP(a, b) * (b))
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define TILER_ACC_MODE_SHIFT (27)
+#define TILER_ACC_MODE_MASK (3)
+#define TILER_GET_ACC_MODE(x) ((enum tiler_fmt)\
+(((u32)x & (TILER_ACC_MODE_MASK<<TILER_ACC_MODE_SHIFT))>>TILER_ACC_MODE_SHIFT))
+
+#define TILER_ALIAS_BASE (0x60000000)
+#define TILER_ACC_MODE_SHIFT (27)
+#define DMM_ACC_MODE_SHIFT (27)
+
+#define TIL_ALIAS_ADDR(x, access_mode)\
+((void *)(TILER_ALIAS_BASE | (u32)x | (access_mode << TILER_ACC_MODE_SHIFT)))
+
+#define TIL_ADDR(x, r, yi, xi, a)\
+((void *)((u32)x | (r << DMM_ROTATION_SHIFT) |\
+(yi << DMM_Y_INVERT_SHIFT) | (xi << DMM_X_INVERT_SHIFT) |\
+(a << DMM_ACC_MODE_SHIFT)))
+
+#define TILER_ALIAS_VIEW_CLEAR (~0xE0000000)
+
+#define DMM_X_INVERT_SHIFT (29)
+#define DMM_GET_X_INVERTED(x) ((((u32)x & (1<<DMM_X_INVERT_SHIFT)) > 0) ? 1 : 0)
+#define DMM_Y_INVERT_SHIFT (30)
+#define DMM_GET_Y_INVERTED(x) ((((u32)x & (1<<DMM_Y_INVERT_SHIFT)) > 0) ? 1 : 0)
+
+#define DMM_ROTATION_SHIFT (31)
+#define DMM_GET_ROTATED(x)\
+((((u32)x & ((u32)1<<DMM_ROTATION_SHIFT)) > 0) ? 1 : 0)
+
+#define DMM_ALIAS_VIEW_CLEAR (~0xE0000000)
+
+#define DMM_TILE_DIMM_X_MODE_8 (32)
+#define DMM_TILE_DIMM_Y_MODE_8 (32)
+
+#define DMM_TILE_DIMM_X_MODE_16 (32)
+#define DMM_TILE_DIMM_Y_MODE_16 (16)
+
+#define DMM_TILE_DIMM_X_MODE_32 (16)
+#define DMM_TILE_DIMM_Y_MODE_32 (16)
+
+#define DMM_PAGE_DIMM_X_MODE_8 (DMM_TILE_DIMM_X_MODE_8*2)
+#define DMM_PAGE_DIMM_Y_MODE_8 (DMM_TILE_DIMM_Y_MODE_8*2)
+
+#define DMM_PAGE_DIMM_X_MODE_16 (DMM_TILE_DIMM_X_MODE_16*2)
+#define DMM_PAGE_DIMM_Y_MODE_16 (DMM_TILE_DIMM_Y_MODE_16*2)
+
+#define DMM_PAGE_DIMM_X_MODE_32 (DMM_TILE_DIMM_X_MODE_32*2)
+#define DMM_PAGE_DIMM_Y_MODE_32 (DMM_TILE_DIMM_Y_MODE_32*2)
+
+#define DMM_HOR_X_ADDRSHIFT_8 (0)
+#define DMM_HOR_X_ADDRMASK_8 (0x3FFF)
+#define DMM_HOR_X_COOR_GET_8(x)\
+ (((unsigned long)x >> DMM_HOR_X_ADDRSHIFT_8) & DMM_HOR_X_ADDRMASK_8)
+#define DMM_HOR_X_PAGE_COOR_GET_8(x)\
+ (DMM_HOR_X_COOR_GET_8(x)/DMM_PAGE_DIMM_X_MODE_8)
+
+#define DMM_HOR_Y_ADDRSHIFT_8 (14)
+#define DMM_HOR_Y_ADDRMASK_8 (0x1FFF)
+#define DMM_HOR_Y_COOR_GET_8(x)\
+ (((unsigned long)x >> DMM_HOR_Y_ADDRSHIFT_8) & DMM_HOR_Y_ADDRMASK_8)
+#define DMM_HOR_Y_PAGE_COOR_GET_8(x)\
+ (DMM_HOR_Y_COOR_GET_8(x)/DMM_PAGE_DIMM_Y_MODE_8)
+
+#define DMM_HOR_X_ADDRSHIFT_16 (1)
+#define DMM_HOR_X_ADDRMASK_16 (0x7FFE)
+#define DMM_HOR_X_COOR_GET_16(x) (((unsigned long)x >> \
+ DMM_HOR_X_ADDRSHIFT_16) & DMM_HOR_X_ADDRMASK_16)
+#define DMM_HOR_X_PAGE_COOR_GET_16(x) (DMM_HOR_X_COOR_GET_16(x) / \
+ DMM_PAGE_DIMM_X_MODE_16)
+
+#define DMM_HOR_Y_ADDRSHIFT_16 (15)
+#define DMM_HOR_Y_ADDRMASK_16 (0xFFF)
+#define DMM_HOR_Y_COOR_GET_16(x) (((unsigned long)x >> \
+ DMM_HOR_Y_ADDRSHIFT_16) & DMM_HOR_Y_ADDRMASK_16)
+#define DMM_HOR_Y_PAGE_COOR_GET_16(x) (DMM_HOR_Y_COOR_GET_16(x) / \
+ DMM_PAGE_DIMM_Y_MODE_16)
+
+#define DMM_HOR_X_ADDRSHIFT_32 (2)
+#define DMM_HOR_X_ADDRMASK_32 (0x7FFC)
+#define DMM_HOR_X_COOR_GET_32(x) (((unsigned long)x >> \
+ DMM_HOR_X_ADDRSHIFT_32) & DMM_HOR_X_ADDRMASK_32)
+#define DMM_HOR_X_PAGE_COOR_GET_32(x) (DMM_HOR_X_COOR_GET_32(x) / \
+ DMM_PAGE_DIMM_X_MODE_32)
+
+#define DMM_HOR_Y_ADDRSHIFT_32 (15)
+#define DMM_HOR_Y_ADDRMASK_32 (0xFFF)
+#define DMM_HOR_Y_COOR_GET_32(x) (((unsigned long)x >> \
+ DMM_HOR_Y_ADDRSHIFT_32) & DMM_HOR_Y_ADDRMASK_32)
+#define DMM_HOR_Y_PAGE_COOR_GET_32(x) (DMM_HOR_Y_COOR_GET_32(x) / \
+ DMM_PAGE_DIMM_Y_MODE_32)
+
+#define DMM_VER_X_ADDRSHIFT_8 (14)
+#define DMM_VER_X_ADDRMASK_8 (0x1FFF)
+#define DMM_VER_X_COOR_GET_8(x)\
+ (((unsigned long)x >> DMM_VER_X_ADDRSHIFT_8) & DMM_VER_X_ADDRMASK_8)
+#define DMM_VER_X_PAGE_COOR_GET_8(x)\
+ (DMM_VER_X_COOR_GET_8(x)/DMM_PAGE_DIMM_X_MODE_8)
+
+#define DMM_VER_Y_ADDRSHIFT_8 (0)
+#define DMM_VER_Y_ADDRMASK_8 (0x3FFF)
+#define DMM_VER_Y_COOR_GET_8(x)\
+ (((unsigned long)x >> DMM_VER_Y_ADDRSHIFT_8) & DMM_VER_Y_ADDRMASK_8)
+#define DMM_VER_Y_PAGE_COOR_GET_8(x)\
+ (DMM_VER_Y_COOR_GET_8(x)/DMM_PAGE_DIMM_Y_MODE_8)
+
+#define DMM_VER_X_ADDRSHIFT_16 (14)
+#define DMM_VER_X_ADDRMASK_16 (0x1FFF)
+#define DMM_VER_X_COOR_GET_16(x) (((unsigned long)x >> \
+ DMM_VER_X_ADDRSHIFT_16) & DMM_VER_X_ADDRMASK_16)
+#define DMM_VER_X_PAGE_COOR_GET_16(x) (DMM_VER_X_COOR_GET_16(x) / \
+ DMM_PAGE_DIMM_X_MODE_16)
+
+#define DMM_VER_Y_ADDRSHIFT_16 (0)
+#define DMM_VER_Y_ADDRMASK_16 (0x3FFF)
+#define DMM_VER_Y_COOR_GET_16(x) (((unsigned long)x >> \
+ DMM_VER_Y_ADDRSHIFT_16) & DMM_VER_Y_ADDRMASK_16)
+#define DMM_VER_Y_PAGE_COOR_GET_16(x) (DMM_VER_Y_COOR_GET_16(x) / \
+ DMM_PAGE_DIMM_Y_MODE_16)
+
+#define DMM_VER_X_ADDRSHIFT_32 (15)
+#define DMM_VER_X_ADDRMASK_32 (0xFFF)
+#define DMM_VER_X_COOR_GET_32(x) (((unsigned long)x >> \
+ DMM_VER_X_ADDRSHIFT_32) & DMM_VER_X_ADDRMASK_32)
+#define DMM_VER_X_PAGE_COOR_GET_32(x) (DMM_VER_X_COOR_GET_32(x) / \
+ DMM_PAGE_DIMM_X_MODE_32)
+
+#define DMM_VER_Y_ADDRSHIFT_32 (0)
+#define DMM_VER_Y_ADDRMASK_32 (0x7FFF)
+#define DMM_VER_Y_COOR_GET_32(x) (((unsigned long)x >> \
+ DMM_VER_Y_ADDRSHIFT_32) & DMM_VER_Y_ADDRMASK_32)
+#define DMM_VER_Y_PAGE_COOR_GET_32(x) (DMM_VER_Y_COOR_GET_32(x) / \
+ DMM_PAGE_DIMM_Y_MODE_32)
+
+#endif
diff --git a/drivers/media/video/tiler/tiler_pack.c b/drivers/media/video/tiler/tiler_pack.c
new file mode 100644
index 000000000000..7c3c6d95ca0f
--- /dev/null
+++ b/drivers/media/video/tiler/tiler_pack.c
@@ -0,0 +1,269 @@
+/*
+ * tiler_pack.c
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include "tiler.h"
+#include "tiler_def.h"
+
+void tiler_alloc_packed(s32 *count, enum tiler_fmt fmt, u32 width, u32 height,
+ void **sysptr, void **allocptr, s32 aligned)
+{
+ int til_width, bpp, bpt, buf_width, alloc_width, map_width;
+ int buf_map_width, n_per_m, m_per_a, i = 0, m, n;
+
+ /* Check input parameters for correctness */
+ if (!width || !height || !sysptr || !allocptr || !count ||
+ *count <= 0 || fmt < TILFMT_8BIT || fmt > TILFMT_32BIT) {
+ if (count)
+ *count = 0;
+ return;
+ }
+
+ /* tiler page width in pixels, bytes per pixel, tiler page in bytes */
+ til_width = fmt == TILFMT_32BIT ? 32 : 64;
+ bpp = 1 << (fmt - TILFMT_8BIT);
+ bpt = til_width * bpp;
+
+ /* width of buffer in tiled pages */
+ buf_width = DIVIDE_UP(width, til_width);
+
+ /* :TODO: for now tiler allocation width is 64-multiple */
+ alloc_width = ROUND_UP_2P(buf_width, 64);
+ map_width = TILER_PAGE / bpt;
+
+ /* ensure alignment if needed */
+ buf_map_width = ROUND_UP_2P(buf_width, map_width);
+
+ /* number of buffers in a map window */
+ n_per_m = aligned ? 1 : (buf_map_width / buf_width);
+
+ /* number of map windows per allocation */
+ m_per_a = alloc_width / buf_map_width;
+
+ printk(KERN_INFO "packing %d*%d buffers into an allocation\n",
+ n_per_m, m_per_a);
+
+ while (i < *count) {
+ /* allocate required width of a frame to fit remaining
+ frames */
+ int n_alloc, m_alloc, tiles, res;
+ void *base;
+
+ n_alloc = MIN(*count - i, m_per_a * n_per_m);
+ m_alloc = DIVIDE_UP(n_alloc, n_per_m);
+ tiles = ((m_alloc - 1) * map_width +
+ buf_width * (n_alloc - (m_alloc - 1) * m_per_a));
+
+ res = tiler_alloc(fmt, til_width * tiles, height,
+ (u32 *)sysptr + i);
+ if (res != 0)
+ break;
+
+ /* mark allocation */
+ base = allocptr[i] = sysptr[i];
+ i++;
+
+ /* portion out remaining buffers */
+ for (m = 0; m < m_per_a; m++, base += bpt * buf_map_width) {
+ for (n = 0; n < n_per_m; n++) {
+ /* first buffer is already allocated */
+ if (n + m == 0)
+ continue;
+
+ /* stop if we are done */
+ if (i == *count)
+ break;
+
+ /* set buffer address */
+ sysptr[i] = base + bpt * n * buf_width;
+ allocptr[i++] = NULL;
+ }
+ }
+ }
+
+ /* mark how many buffers we allocated */
+ *count = i;
+}
+EXPORT_SYMBOL(tiler_alloc_packed);
+
+static int layout_packed_nv12(char *offsets, int y_width, int uv_width,
+ void **buf, int blocks, int i,
+ void **y_sysptr, void **uv_sysptr,
+ void **y_allocptr, void **uv_allocptr)
+{
+ int j;
+ for (j = 0; j < blocks; j++, offsets += 3) {
+ int page_offset = (63 & (int) offsets[0])
+ + y_width * ((int) offsets[1])
+ + uv_width * (int) offsets[2];
+ void *base = buf[offsets[0] >> 6] + 64 * page_offset;
+
+ if (j & 1) {
+ /* convert 8-bit to 16-bit view */
+ /* this formula only works for even ys */
+ uv_sysptr[i] = base + (0x3FFF & (unsigned long) base)
+ + 0x8000000;
+ uv_allocptr[i] = page_offset ? NULL : uv_sysptr[i];
+ i++;
+ } else {
+ y_sysptr[i] = base;
+ y_allocptr[i] = page_offset ? NULL : y_sysptr[i];
+ }
+ }
+ return i;
+}
+
+void tiler_alloc_packed_nv12(s32 *count, u32 width, u32 height, void **y_sysptr,
+ void **uv_sysptr, void **y_allocptr,
+ void **uv_allocptr, s32 aligned)
+{
+ /* optimized packing table */
+ /* we read this table from beginning to end, and determine whether
+ the optimization meets our requirement (e.g. allocating at least
+ i buffers, with max w y-width, and alignment a. If not, we get
+ to the next element. Otherwise we do the allocation. The table
+ is constructed in such a way that if an interim tiler allocation
+ fails, the next matching rule for the scenario will be able to
+ use the buffers already allocated. */
+
+#define MAX_BUFS_TO_PACK 3
+ void *buf[MAX_BUFS_TO_PACK];
+ int n_buf, buf_w[MAX_BUFS_TO_PACK];
+
+ char packing[] = {
+ /* min(i), max(w), aligned, buffers to alloc */
+ 5, 16, 0, 2,
+ /* buffer widths in a + b * w(y) + c * w(uv) */
+ 64, 0, 0, 64, 0, 0,
+ /* tiler-page offsets in
+ a + b * w(y) + c * w(uv) */
+ 0, 0, 0, 32, 0, 0,
+ 16, 0, 0, 40, 0, 0,
+ 64, 0, 0, 96, 0, 0,
+ 80, 0, 0, 104, 0, 0,
+ 112, 0, 0, 56, 0, 0,
+
+ 2, 16, 0, 1,
+ 32, 0, 2,
+ 0, 0, 0, 32, 0, 0,
+ 0, 0, 2, 32, 0, 1,
+
+ 2, 20, 0, 1,
+ 42, 1, 0,
+ 0, 0, 0, 32, 0, 0,
+ 42, 0, 0, 21, 0, 0,
+
+ 3, 24, 0, 2,
+ 48, 0, 1, 32, 1, 0,
+ 0, 0, 0, 64, 0, 0,
+ 24, 0, 0, 76, 0, 0,
+ 96, 0, 0, 48, 0, 0,
+
+ 4, 32, 0, 3,
+ 48, 0, 1, 32, 1, 0, 32, 1, 0,
+ 0, 0, 0, 32, 0, 0,
+ 96, 0, 0, 48, 0, 0,
+ 64, 0, 0, 128, 0, 0,
+ 160, 0, 0, 144, 0, 0,
+
+ /* this is needed for soft landing if prior allocation fails
+ after two buffers */
+ 2, 32, 1, 2,
+ 32, 0, 1, 32, 0, 1,
+ 0, 0, 0, 32, 0, 0,
+ 64, 0, 0, 96, 0, 0,
+
+ 1, 32, 1, 1,
+ 32, 0, 1,
+ 0, 0, 0, 32, 0, 0,
+
+ 2, 64, 1, 3,
+ 0, 1, 0, 32, 0, 1, 0, 1, 0,
+ 0, 0, 0, 64, 0, 0,
+ 128, 0, 0, 96, 0, 0,
+ /* this is the basic NV12 allocation using 2 buffers */
+ 1, 0, 1, 2,
+ 0, 1, 0, 0, 0, 1,
+ 0, 0, 0, 64, 0, 0,
+ 0 };
+ int y_width, uv_width, i = 0;
+
+ /* Check input parameters for correctness */
+ if (!width || !height || !y_sysptr || !y_allocptr || !count ||
+ !uv_sysptr || !uv_allocptr || *count <= 0) {
+ if (count)
+ *count = 0;
+ return;
+ }
+
+ y_width = DIVIDE_UP(width, 64);
+ uv_width = DIVIDE_UP(width >> 1, 64);
+
+ while (i < *count) {
+ int n_alloc = *count - i;
+ char *p = packing;
+ n_buf = 0;
+
+ /* skip packings that do not apply */
+ while (*p) {
+ /* see if this packing applies */
+ if (p[0] <= n_alloc &&
+ (!p[1] || p[1] >= y_width) &&
+ (!aligned || p[2])) {
+
+ /* allocate buffers */
+ while (n_buf < p[3]) {
+ buf_w[n_buf] = p[4 + 3 * n_buf] +
+ y_width * p[5 + 3 * n_buf] +
+ uv_width * p[6 + 3 * n_buf];
+
+ if (0 != tiler_alloc(
+ TILFMT_8BIT, buf_w[n_buf] * 64,
+ height, (u32 *)buf + n_buf))
+ break;
+ n_buf++;
+ }
+
+ /* if successfully allocated buffers */
+ if (n_buf >= p[3]) {
+ i = layout_packed_nv12(p + 4 + 3 * p[3],
+ y_width,
+ uv_width,
+ buf, 2 * p[0], i,
+ y_sysptr,
+ uv_sysptr,
+ y_allocptr,
+ uv_allocptr);
+ break;
+ }
+ }
+
+ p += 4 + 3 * p[3] + 6 * p[0];
+ }
+
+ /* if allocation failed free any outstanding buffers and stop */
+ if (!*p) {
+ while (n_buf > 0)
+ tiler_free((unsigned long)(buf[--n_buf]));
+ break;
+ }
+ }
+
+ /* mark how many buffers we allocated */
+ *count = i;
+}
+EXPORT_SYMBOL(tiler_alloc_packed_nv12);
diff --git a/drivers/media/video/tiler/tiler_rot.c b/drivers/media/video/tiler/tiler_rot.c
new file mode 100644
index 000000000000..e82257e47d0f
--- /dev/null
+++ b/drivers/media/video/tiler/tiler_rot.c
@@ -0,0 +1,239 @@
+/*
+ * tiler_rot.c
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include "tiler.h"
+#include "tiler_def.h"
+
+#define DMM_SHIFT_PER_X_8 0
+#define DMM_SHIFT_PER_Y_8 0
+#define DMM_SHIFT_PER_X_16 0
+#define DMM_SHIFT_PER_Y_16 1
+#define DMM_SHIFT_PER_X_32 1
+#define DMM_SHIFT_PER_Y_32 1
+#define DMM_SHIFT_PER_X_PAGE 6
+#define DMM_SHIFT_PER_Y_PAGE 6
+
+#define DMM_TILER_THE(NAME) (1 << DMM_TILER_##NAME##_BITS)
+#define DMM_TILER_THE_(N, NAME) (1 << DMM_TILER_##NAME##_BITS_(N))
+
+#define DMM_TILER_CONT_WIDTH_BITS 14
+#define DMM_TILER_CONT_HEIGHT_BITS 13
+
+#define DMM_SHIFT_PER_P_(N) (DMM_SHIFT_PER_X_##N + DMM_SHIFT_PER_Y_##N)
+
+#define DMM_TILER_CONT_HEIGHT_BITS_(N) \
+ (DMM_TILER_CONT_HEIGHT_BITS - DMM_SHIFT_PER_Y_##N)
+#define DMM_TILER_CONT_WIDTH_BITS_(N) \
+ (DMM_TILER_CONT_WIDTH_BITS - DMM_SHIFT_PER_X_##N)
+
+#define DMM_TILER_MASK(bits) ((1 << (bits)) - 1)
+
+#define DMM_TILER_GET_OFFSET_(N, var) \
+ ((((u32) var) & DMM_TILER_MASK(DMM_TILER_CONT_WIDTH_BITS + \
+ DMM_TILER_CONT_HEIGHT_BITS)) >> DMM_SHIFT_PER_P_(N))
+
+#define DMM_TILER_GET_0_X_(N, var) \
+ (DMM_TILER_GET_OFFSET_(N, var) & \
+ DMM_TILER_MASK(DMM_TILER_CONT_WIDTH_BITS_(N)))
+#define DMM_TILER_GET_0_Y_(N, var) \
+ (DMM_TILER_GET_OFFSET_(N, var) >> DMM_TILER_CONT_WIDTH_BITS_(N))
+#define DMM_TILER_GET_90_X_(N, var) \
+ (DMM_TILER_GET_OFFSET_(N, var) & \
+ DMM_TILER_MASK(DMM_TILER_CONT_HEIGHT_BITS_(N)))
+#define DMM_TILER_GET_90_Y_(N, var) \
+ (DMM_TILER_GET_OFFSET_(N, var) >> DMM_TILER_CONT_HEIGHT_BITS_(N))
+
+#define DMM_TILER_STRIDE_0_(N) \
+ (DMM_TILER_THE(CONT_WIDTH) << DMM_SHIFT_PER_Y_##N)
+#define DMM_TILER_STRIDE_90_(N) \
+ (DMM_TILER_THE(CONT_HEIGHT) << DMM_SHIFT_PER_X_##N)
+
+void tiler_get_natural_xy(u32 tsptr, u32 *x, u32 *y)
+{
+ u32 x_bits, y_bits, offset;
+ enum tiler_fmt fmt;
+
+ fmt = TILER_GET_ACC_MODE(tsptr);
+
+ switch (fmt + 1) {
+ case TILFMT_8BIT:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(8);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(8);
+ offset = DMM_TILER_GET_OFFSET_(8, tsptr);
+ break;
+ case TILFMT_16BIT:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(16);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(16);
+ offset = DMM_TILER_GET_OFFSET_(16, tsptr);
+ break;
+ case TILFMT_32BIT:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(32);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(32);
+ offset = DMM_TILER_GET_OFFSET_(32, tsptr);
+ break;
+ case TILFMT_PAGE:
+ default:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(PAGE);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(PAGE);
+ offset = DMM_TILER_GET_OFFSET_(PAGE, tsptr);
+ break;
+ }
+
+ if (DMM_GET_ROTATED(tsptr)) {
+ *x = offset >> y_bits;
+ *y = offset & DMM_TILER_MASK(y_bits);
+ } else {
+ *x = offset & DMM_TILER_MASK(x_bits);
+ *y = offset >> x_bits;
+ }
+
+ if (DMM_GET_X_INVERTED(tsptr))
+ *x ^= DMM_TILER_MASK(x_bits);
+ if (DMM_GET_Y_INVERTED(tsptr))
+ *y ^= DMM_TILER_MASK(y_bits);
+}
+
+u32 tiler_get_address(struct tiler_view_orient orient,
+ enum tiler_fmt fmt, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ switch (fmt + 1) {
+ case TILFMT_8BIT:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(8);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(8);
+ alignment = DMM_SHIFT_PER_P_(8);
+ break;
+ case TILFMT_16BIT:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(16);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(16);
+ alignment = DMM_SHIFT_PER_P_(16);
+ break;
+ case TILFMT_32BIT:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(32);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(32);
+ alignment = DMM_SHIFT_PER_P_(32);
+ break;
+ case TILFMT_PAGE:
+ default:
+ x_bits = DMM_TILER_CONT_WIDTH_BITS_(PAGE);
+ y_bits = DMM_TILER_CONT_HEIGHT_BITS_(PAGE);
+ alignment = DMM_SHIFT_PER_P_(PAGE);
+ break;
+ }
+
+ x_mask = DMM_TILER_MASK(x_bits);
+ y_mask = DMM_TILER_MASK(y_bits);
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask)
+ return 0;
+
+ if (orient.x_invert)
+ x ^= x_mask;
+ if (orient.y_invert)
+ y ^= y_mask;
+
+ if (orient.rotate_90)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return (u32)
+ TIL_ADDR((tmp << alignment), (orient.rotate_90 ? 1 : 0),
+ (orient.y_invert ? 1 : 0), (orient.x_invert ? 1 : 0),
+ fmt);
+}
+
+u32 tiler_reorient_addr(u32 tsptr, struct tiler_view_orient orient)
+{
+ u32 x, y;
+
+ tiler_get_natural_xy(tsptr, &x, &y);
+ return tiler_get_address(orient, TILER_GET_ACC_MODE(tsptr), x, y);
+}
+EXPORT_SYMBOL(tiler_reorient_addr);
+
+u32 tiler_get_natural_addr(void *sys_ptr)
+{
+ return (u32)sys_ptr & DMM_ALIAS_VIEW_CLEAR;
+}
+EXPORT_SYMBOL(tiler_get_natural_addr);
+
+u32 tiler_reorient_topleft(u32 tsptr, struct tiler_view_orient orient,
+ u32 width, u32 height)
+{
+ enum tiler_fmt fmt;
+ u32 x, y;
+
+ fmt = TILER_GET_ACC_MODE(tsptr);
+
+ tiler_get_natural_xy(tsptr, &x, &y);
+
+ if (DMM_GET_X_INVERTED(tsptr))
+ x -= width - 1;
+ if (DMM_GET_Y_INVERTED(tsptr))
+ y -= height - 1;
+
+ if (orient.x_invert)
+ x += width - 1;
+ if (orient.y_invert)
+ y += height - 1;
+
+ return tiler_get_address(orient, fmt, x, y);
+}
+EXPORT_SYMBOL(tiler_reorient_topleft);
+
+u32 tiler_stride(u32 tsptr)
+{
+ enum tiler_fmt fmt;
+
+ fmt = TILER_GET_ACC_MODE(tsptr);
+
+ switch (fmt + 1) {
+ case TILFMT_8BIT:
+ return DMM_GET_ROTATED(tsptr) ?
+ DMM_TILER_STRIDE_90_(8) : DMM_TILER_STRIDE_0_(8);
+ case TILFMT_16BIT:
+ return DMM_GET_ROTATED(tsptr) ?
+ DMM_TILER_STRIDE_90_(16) : DMM_TILER_STRIDE_0_(16);
+ case TILFMT_32BIT:
+ return DMM_GET_ROTATED(tsptr) ?
+ DMM_TILER_STRIDE_90_(32) : DMM_TILER_STRIDE_0_(32);
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(tiler_stride);
+
+void tiler_rotate_view(struct tiler_view_orient *orient, u32 rotation)
+{
+ rotation = (rotation / 90) & 3;
+
+ if (rotation & 2) {
+ orient->x_invert = !orient->x_invert;
+ orient->y_invert = !orient->y_invert;
+ }
+
+ if (rotation & 1) {
+ if (orient->rotate_90)
+ orient->y_invert = !orient->y_invert;
+ else
+ orient->x_invert = !orient->x_invert;
+ orient->rotate_90 = !orient->rotate_90;
+ }
+}
+EXPORT_SYMBOL(tiler_rotate_view);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 4b11257c3184..032f0b21ee7d 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -283,6 +283,10 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT",
[_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK",
+
+ [_IOC_NR(VIDIOC_S_COL_SPC_CONV)] = "VIDIOC_S_COL_SPC_CONV",
+ [_IOC_NR(VIDIOC_G_COL_SPC_CONV)] = "VIDIOC_G_COL_SPC_CONV",
+
#endif
[_IOC_NR(VIDIOC_ENUM_DV_PRESETS)] = "VIDIOC_ENUM_DV_PRESETS",
[_IOC_NR(VIDIOC_S_DV_PRESET)] = "VIDIOC_S_DV_PRESET",
@@ -1942,6 +1946,26 @@ static long __video_do_ioctl(struct file *file,
break;
}
+ /*---------------Color space conversion------------------------------*/
+ case VIDIOC_S_COL_SPC_CONV:
+ {
+ struct v4l2_color_space_conversion *p = arg;
+ if (!ops->vidioc_s_color_space_conv)
+ break;
+
+ ret = ops->vidioc_s_color_space_conv(file, fh, p);
+ break;
+ }
+
+ case VIDIOC_G_COL_SPC_CONV:
+ {
+ struct v4l2_color_space_conversion *p = arg;
+ if (!ops->vidioc_g_color_space_conv)
+ break;
+ ret = ops->vidioc_g_color_space_conv(file, fh, p);
+ break;
+ }
+
default:
{
if (!ops->vidioc_default)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 30acd5265821..249b4346135a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1328,6 +1328,17 @@ EXPORT_SYMBOL(mmc_resume_host);
#endif
+#ifdef CONFIG_TIWLAN_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_embedded_func *funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.funcs = funcs;
+}
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 06b64085a355..26c5ab3a22f8 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -24,6 +24,10 @@
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_TIWLAN_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
@@ -286,6 +290,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto remove;
+#ifdef CONFIG_TIWLAN_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis,
+ sizeof(struct sdio_cis));
+ else {
+#endif
/*
* Read the common CIS tuples.
*/
@@ -293,6 +303,10 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto remove;
+#ifdef CONFIG_TIWLAN_SDIO
+ }
+#endif
+
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device);
@@ -530,9 +544,29 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
+#ifdef CONFIG_TIWLAN_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize =
+ host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
+#ifdef CONFIG_TIWLAN_SDIO
+ }
+#endif
}
mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 9e060c87e64d..11d8bf15d589 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -20,6 +20,10 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
+#ifdef CONFIG_TIWLAN_SDIO
+#include <linux/mmc/host.h>
+#endif
+
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
@@ -199,6 +203,13 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
+#ifdef CONFIG_TIWLAN_SDIO
+ /*
+ * If this device is embedded then we never allocated
+ * cis tables for this func
+ */
+ if (!func->card->host->embedded_sdio_data.funcs)
+#endif
sdio_free_func_cis(func);
if (func->info)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9266e2897047..c175d66914ff 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1111,6 +1111,7 @@ static int mmc_populate_adma_desc_table(struct omap_hsmmc_host *host,
static void omap_hsmmc_start_adma_transfer(struct omap_hsmmc_host *host)
{
+ wmb();
OMAP_HSMMC_WRITE(host->base, ADMA_SAL, host->phy_adma_table);
}
@@ -1361,6 +1362,28 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmc_host_lazy_disable(host->mmc);
}
+#ifdef CONFIG_TIWLAN_SDIO
+static void omap_hsmmc_status_notify_cb(int card_present, void *dev_id)
+{
+ struct omap_hsmmc_host *host = dev_id;
+ struct omap_mmc_slot_data *slot = &mmc_slot(host);
+ int carddetect;
+
+ printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
+ card_present);
+
+ carddetect = slot->card_detect(slot->card_detect_irq);
+
+ sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
+ if (carddetect) {
+ mmc_detect_change(host->mmc, (HZ * 200) / 1000);
+ } else {
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ mmc_detect_change(host->mmc, (HZ * 50) / 1000);
+ }
+}
+#endif
+
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
@@ -1806,6 +1829,15 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
dev_dbg(mmc_dev(host->mmc), "DMA Mode=%d\n", host->dma_type);
+#ifdef CONFIG_TIWLAN_SDIO
+ if (pdev->id == CONFIG_TIWLAN_MMC_CONTROLLER-1) {
+ if (pdata->slots[0].embedded_sdio != NULL) {
+ mmc_set_embedded_sdio_data(mmc,
+ &pdata->slots[0].embedded_sdio->cis,
+ pdata->slots[0].embedded_sdio->funcs);
+ }
+ }
+#endif
platform_set_drvdata(pdev, host);
INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
@@ -1958,6 +1990,15 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
}
+#ifdef CONFIG_TIWLAN_SDIO
+ else if (mmc_slot(host).register_status_notify) {
+ if (pdev->id == CONFIG_TIWLAN_MMC_CONTROLLER-1) {
+ mmc_slot(host).register_status_notify(
+ omap_hsmmc_status_notify_cb, host);
+ }
+ }
+#endif
+
OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 866526e43a58..fd7b967bbc98 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -49,6 +49,7 @@ struct twlreg_info {
/* chip constraints on regulator behavior */
u16 min_mV;
+ u16 max_mV;
/* used by regulator core */
struct regulator_desc desc;
@@ -318,31 +319,8 @@ static const u16 VIO_VSEL_table[] = {
static const u16 VINTANA2_VSEL_table[] = {
2500, 2750,
};
-static const u16 VAUX1_6030_VSEL_table[] = {
- 1000, 1300, 1800, 2500,
- 2800, 2900, 3000, 3000,
-};
-static const u16 VAUX2_6030_VSEL_table[] = {
- 1200, 1800, 2500, 2750,
- 2800, 2800, 2800, 2800,
-};
-static const u16 VAUX3_6030_VSEL_table[] = {
- 1000, 1200, 1300, 1800,
- 2500, 2800, 3000, 3000,
-};
-static const u16 VMMC_VSEL_table[] = {
- 1200, 1800, 2800, 2900,
- 3000, 3000, 3000, 3000,
-};
-static const u16 VPP_VSEL_table[] = {
- 1800, 1900, 2000, 2100,
- 2200, 2300, 2400, 2500,
-};
-static const u16 VUSIM_VSEL_table[] = {
- 1200, 1800, 2500, 2900,
-};
-static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int mV = info->table[index];
@@ -351,7 +329,7 @@ static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int
-twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
@@ -367,21 +345,15 @@ twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
/* REVISIT for VAUX2, first match may not be best/lowest */
/* use the first in-range value */
- if (min_uV <= uV && uV <= max_uV) {
- if (twl_class_is_6030())
- /*
- * Use the below formula to calculate vsel
- * mV = 1000mv + 100mv * (vsel - 1)
- */
- vsel = (LDO_MV(mV) - 1000)/100 + 1;
+ if (min_uV <= uV && uV <= max_uV)
return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
VREG_VOLTAGE, vsel);
- }
}
+
return -EDOM;
}
-static int twlldo_get_voltage(struct regulator_dev *rdev)
+static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
@@ -390,24 +362,71 @@ static int twlldo_get_voltage(struct regulator_dev *rdev)
if (vsel < 0)
return vsel;
- if (twl_class_is_4030()) {
- vsel &= info->table_len - 1;
- return LDO_MV(info->table[vsel]) * 1000;
- } else if (twl_class_is_6030()) {
- /*
- * Use the below formula to calculate vsel
- * mV = 1000mv + 100mv * (vsel - 1)
- */
- return (1000 + (100 * (vsel - 1))) * 1000;
- }
- return -EDOM;
+ vsel &= info->table_len - 1;
+ return LDO_MV(info->table[vsel]) * 1000;
}
-static struct regulator_ops twlldo_ops = {
- .list_voltage = twlldo_list_voltage,
+static struct regulator_ops twl4030ldo_ops = {
+ .list_voltage = twl4030ldo_list_voltage,
- .set_voltage = twlldo_set_voltage,
- .get_voltage = twlldo_get_voltage,
+ .set_voltage = twl4030ldo_set_voltage,
+ .get_voltage = twl4030ldo_get_voltage,
+
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
+
+ .set_mode = twlreg_set_mode,
+
+ .get_status = twlreg_get_status,
+};
+
+static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return (info->min_mV + (index * 100)) * 1000;
+}
+
+static int
+twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel;
+
+ if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV))
+ return -EDOM;
+
+ /*
+ * Use the below formula to calculate vsel
+ * mV = 1000mv + 100mv * (vsel - 1)
+ */
+ vsel = (min_uV/1000 - 1000)/100 + 1;
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
+
+}
+
+static int twl6030ldo_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE);
+
+ if (vsel < 0)
+ return vsel;
+
+ /*
+ * Use the below formula to calculate vsel
+ * mV = 1000mv + 100mv * (vsel - 1)
+ */
+ return (1000 + (100 * (vsel - 1))) * 1000;
+}
+
+static struct regulator_ops twl6030ldo_ops = {
+ .list_voltage = twl6030ldo_list_voltage,
+
+ .set_voltage = twl6030ldo_set_voltage,
+ .get_voltage = twl6030ldo_get_voltage,
.enable = twlreg_enable,
.disable = twlreg_disable,
@@ -453,24 +472,16 @@ static struct regulator_ops twlfixed_ops = {
/*----------------------------------------------------------------------*/
-#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
- TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
- remap_conf, TWL4030)
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL4030)
-#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
- remap_conf) \
- TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
- remap_conf, TWL6030)
#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL6030)
-#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \
- family) { \
+#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
.base = offset, \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
@@ -479,14 +490,32 @@ static struct regulator_ops twlfixed_ops = {
.remap = remap_conf, \
.desc = { \
.name = #label, \
- .id = family##_REG_##label, \
+ .id = TWL4030_REG_##label, \
.n_voltages = ARRAY_SIZE(label##_VSEL_table), \
- .ops = &twlldo_ops, \
+ .ops = &twl4030ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \
+ remap_conf) { \
+ .base = offset, \
+ .id = num, \
+ .min_mV = min_mVolts, \
+ .max_mV = max_mVolts, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .n_voltages = (max_mVolts - min_mVolts)/100, \
+ .ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
+
#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
family) { \
.base = offset, \
@@ -534,12 +563,13 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 37, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 38, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 39, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 40, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 41, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 42, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 37, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 38, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 39, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 40, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 41, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 42, 0x21),
+
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 43, 0, 0x21),
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 44, 0, 0x21),
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 45, 0, 0x21),
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index ce9112f60c04..1091d6d89c03 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -168,7 +168,7 @@ static int twl_rtc_read(u8 *value, u8 reg, unsigned num_bytes)
int ret = 0, i = 0;
for (i = 0; i < num_bytes; i++)
- if (twl_rtc_read_u8(value + i, (rtc_reg_map[reg + i])))
+ if (twl_rtc_read_u8(value + i, (reg + i)))
return ret;
return ret;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 9ff47db0b2ce..95055e72d6b2 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1366,6 +1366,33 @@ config SERIAL_OF_PLATFORM
Currently, only 8250 compatible ports are supported, but
others can easily be added.
+config SERIAL_OMAP
+ tristate "OMAP serial port support"
+ depends on ARCH_OMAP3 || ARCH_OMAP4
+ select SERIAL_CORE
+ help
+ If you have a machine based on an Texas Instruments OMAP CPU you
+ can enable its onboard serial ports by enabling this option.
+
+ By enabling this option you take advantage of dma feature available
+ with the omap-serial driver. DMA support can be enabled from platform
+ data.
+
+config SERIAL_OMAP_CONSOLE
+ bool "Console on OMAP serial port"
+ depends on SERIAL_OMAP
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this option if you would like to use omap serial port as
+ console.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyOx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
+
config SERIAL_OF_PLATFORM_NWPSERIAL
tristate "NWP serial port driver"
depends on PPC_OF && PPC_DCR
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 5548fe7df61d..42962b3a2040 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -82,3 +82,4 @@ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
+obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
diff --git a/drivers/serial/omap-serial.c b/drivers/serial/omap-serial.c
new file mode 100644
index 000000000000..288a3591db61
--- /dev/null
+++ b/drivers/serial/omap-serial.c
@@ -0,0 +1,1323 @@
+/*
+ * Driver for OMAP-UART controller.
+ * Based on drivers/serial/8250.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Authors:
+ * Govindraj R <govindraj.raja@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Note: This driver is made seperate from 8250 driver as we cannot
+ * over load 8250 driver with omap platform specific configuration for
+ * features like DMA, it makes easier to implement features like DMA and
+ * hardware flow control and software flow control configuration with
+ * this driver as required for the omap-platform.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial_reg.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/serial_core.h>
+
+#include <asm/irq.h>
+#include <plat/dma.h>
+#include <plat/dmtimer.h>
+#include <plat/omap-serial.h>
+
+static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
+
+/* Forward declaration of functions */
+static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
+static void serial_omap_rx_timeout(unsigned long uart_no);
+static int serial_omap_start_rxdma(struct uart_omap_port *up);
+
+static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
+{
+ offset <<= up->port.regshift;
+ return readw(up->port.membase + offset);
+}
+
+static inline void serial_out(struct uart_omap_port *up, int offset, int value)
+{
+ offset <<= up->port.regshift;
+ writew(value, up->port.membase + offset);
+}
+
+static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
+{
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+}
+
+/**
+ * serial_omap_get_divisor() - calculate divisor value
+ * @port: uart port info
+ * @baud: baudrate for which divisor needs to be calculated.
+ *
+ * We have written our own function to get the divisor so as to support
+ * 13x mode. 3Mbps Baudrate as an different divisor.
+ * Reference OMAP TRM Chapter 17:
+ * Table 17-1. UART Mode Baud Rates, Divisor Values, and Error Rates
+ * referring to oversampling - divisor value
+ * baudrate 460,800 to 3,686,400 all have divisor 13
+ * except 3,000,000 which has divisor value 16
+ */
+static unsigned int
+serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
+{
+ unsigned int divisor;
+
+ if (baud > OMAP_MODE13X_SPEED && baud != 3000000)
+ divisor = 13;
+ else
+ divisor = 16;
+ return port->uartclk/(baud * divisor);
+}
+
+static void serial_omap_stop_rxdma(struct uart_omap_port *up)
+{
+ if (up->uart_dma.rx_dma_used) {
+ del_timer(&up->uart_dma.rx_timer);
+ omap_stop_dma(up->uart_dma.rx_dma_channel);
+ omap_free_dma(up->uart_dma.rx_dma_channel);
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_used = false;
+ }
+}
+
+static void serial_omap_enable_ms(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void serial_omap_stop_tx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->use_dma &&
+ up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) {
+ /*
+ * Check if dma is still active. If yes do nothing,
+ * return. Else stop dma
+ */
+ if (omap_get_dma_active_status(up->uart_dma.tx_dma_channel))
+ return;
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ omap_free_dma(up->uart_dma.tx_dma_channel);
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+ if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_omap_stop_rx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->use_dma)
+ serial_omap_stop_rxdma(up);
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static inline void receive_chars(struct uart_omap_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int flag;
+ unsigned char ch, lsr = *status;
+ int max_count = 256;
+
+ do {
+ if (likely(lsr & UART_LSR_DR))
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+ /*
+ * For statistics only
+ */
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (lsr & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (lsr & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (lsr & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ lsr &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+ if (up->port.line == up->port.cons->index) {
+ /* Recover the break flag from console xmit */
+ lsr |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+#endif
+ if (lsr & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+ uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
+ignore_char:
+ lsr = serial_in(up, UART_LSR);
+ } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+ spin_unlock(&up->port.lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&up->port.lock);
+}
+
+static void transmit_chars(struct uart_omap_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_omap_stop_tx(&up->port);
+ return;
+ }
+ count = up->port.fifosize / 4;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ serial_omap_stop_tx(&up->port);
+}
+
+static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
+{
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_omap_start_tx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ struct circ_buf *xmit;
+ unsigned int start;
+ int ret = 0;
+
+ if (!up->use_dma || up->port.x_char) {
+ serial_omap_enable_ier_thri(up);
+ return;
+ }
+
+ xmit = &up->port.state->xmit;
+ if (uart_circ_empty(xmit) || up->uart_dma.tx_dma_used)
+ return;
+
+ if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE)
+ ret = omap_request_dma(up->uart_dma.uart_dma_tx,
+ "UART Tx DMA",
+ (void *)uart_tx_dma_callback, up,
+ &(up->uart_dma.tx_dma_channel));
+
+ if (ret < 0) {
+ serial_omap_enable_ier_thri(up);
+ return;
+ }
+
+ start = up->uart_dma.tx_buf_dma_phys +
+ (xmit->tail & (UART_XMIT_SIZE - 1));
+ spin_lock(&(up->uart_dma.tx_lock));
+ up->uart_dma.tx_dma_used = true;
+ spin_unlock(&(up->uart_dma.tx_lock));
+
+ up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
+ /*
+ * It is a circular buffer. See if the buffer has wounded back.
+ * If yes it will have to be transferred in two separate dma
+ * transfers
+ */
+ if (start + up->uart_dma.tx_buf_size >=
+ up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
+ up->uart_dma.tx_buf_size =
+ (up->uart_dma.tx_buf_dma_phys +
+ UART_XMIT_SIZE) - start;
+
+ omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC, start, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.tx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_tx, 0);
+ wmb();
+ omap_start_dma(up->uart_dma.tx_dma_channel);
+}
+
+static unsigned int check_modem_status(struct uart_omap_port *up)
+{
+ int status;
+ status = serial_in(up, UART_MSR);
+
+ status |= up->msr_saved_flags;
+ up->msr_saved_flags = 0;
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return status;
+ if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+ up->port.state != NULL) {
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change
+ (&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change
+ (&up->port, status & UART_MSR_CTS);
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+
+ return status;
+}
+
+/**
+ * serial_omap_irq() - This handles the interrupt from one port
+ * @irq: uart port irq number
+ * @dev_id: uart port info
+ */
+static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
+{
+ struct uart_omap_port *up = dev_id;
+ unsigned int iir, lsr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_NONE;
+
+ lsr = serial_in(up, UART_LSR);
+ if (iir & UART_IER_RLSI) {
+ if (up->use_dma)
+ up->ier &= ~UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+ if (!up->use_dma ||
+ serial_omap_start_rxdma(up) != 0)
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ }
+
+ check_modem_status(up);
+ if ((lsr & UART_LSR_THRE) && (iir & UART_IIR_THRI))
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ up->port_activity = jiffies;
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_omap_tx_empty(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+ unsigned int ret = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_omap_get_mctrl(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char status;
+ unsigned int ret = 0;
+
+ status = check_modem_status(up);
+ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
+
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char mcr = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr |= up->mcr;
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial_omap_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int serial_omap_startup(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+ int retval;
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
+ up->name, up);
+ if (retval)
+ return retval;
+
+ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_omap_clear_fifos(up);
+ /* For Hardware flow control */
+ serial_out(up, UART_MCR, UART_MCR_RTS);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) serial_in(up, UART_LSR);
+ if (serial_in(up, UART_LSR) & UART_LSR_DR)
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+ spin_lock_irqsave(&up->port.lock, flags);
+ /*
+ * Most PC uarts need OUT2 raised to enable interrupts.
+ */
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ up->msr_saved_flags = 0;
+ if (up->use_dma) {
+ free_page((unsigned long)up->port.state->xmit.buf);
+ up->port.state->xmit.buf = dma_alloc_coherent(NULL,
+ UART_XMIT_SIZE,
+ (dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
+ 0);
+ init_timer(&(up->uart_dma.rx_timer));
+ up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
+ up->uart_dma.rx_timer.data = up->pdev->id;
+ /* Currently the buffer size is 4KB. Can increase it */
+ up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
+ up->uart_dma.rx_buf_size,
+ (dma_addr_t *)&(up->uart_dma.rx_buf_dma_phys), 0);
+ }
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+
+ up->port_activity = jiffies;
+ return 0;
+}
+
+static void serial_omap_shutdown(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_omap_clear_fifos(up);
+
+ /*
+ * Read data port to reset things, and then free the irq
+ */
+ if (serial_in(up, UART_LSR) & UART_LSR_DR)
+ (void) serial_in(up, UART_RX);
+ if (up->use_dma) {
+ int tmp;
+ dma_free_coherent(up->port.dev,
+ UART_XMIT_SIZE, up->port.state->xmit.buf,
+ up->uart_dma.tx_buf_dma_phys);
+ up->port.state->xmit.buf = NULL;
+ serial_omap_stop_rx(port);
+ dma_free_coherent(up->port.dev,
+ up->uart_dma.rx_buf_size, up->uart_dma.rx_buf,
+ up->uart_dma.rx_buf_dma_phys);
+ up->uart_dma.rx_buf = NULL;
+ tmp = serial_in(up, UART_OMAP_SYSC) & OMAP_UART_SYSC_RESET;
+ serial_out(up, UART_OMAP_SYSC, tmp); /* force-idle */
+ }
+ free_irq(up->port.irq, up);
+}
+
+static inline void
+serial_omap_configure_xonxoff
+ (struct uart_omap_port *up, struct ktermios *termios)
+{
+ unsigned char efr = 0;
+
+ up->lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr & ~UART_EFR_ECB);
+
+ serial_out(up, UART_XON1, termios->c_cc[VSTART]);
+ serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
+
+ /* clear SW control mode bits */
+ efr = up->efr;
+ efr &= OMAP_UART_SW_CLR;
+
+ /*
+ * IXON Flag:
+ * Enable XON/XOFF flow control on output.
+ * Transmit XON1, XOFF1
+ */
+ if (termios->c_iflag & IXON)
+ efr |= OMAP_UART_SW_TX;
+
+ /*
+ * IXOFF Flag:
+ * Enable XON/XOFF flow control on input.
+ * Receiver compares XON1, XOFF1.
+ */
+ if (termios->c_iflag & IXOFF)
+ efr |= OMAP_UART_SW_RX;
+
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+
+ up->mcr = serial_in(up, UART_MCR);
+
+ /*
+ * IXANY Flag:
+ * Enable any character to restart output.
+ * Operation resumes after receiving any
+ * character after recognition of the XOFF character
+ */
+ if (termios->c_iflag & IXANY)
+ up->mcr |= UART_MCR_XONANY;
+
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
+ /* Enable special char function UARTi.EFR_REG[5] and
+ * load the new software flow control mode IXON or IXOFF
+ * and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
+ */
+ serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+
+ serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, up->lcr);
+}
+
+static void
+serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char cval = 0;
+ unsigned char efr = 0;
+ unsigned long flags = 0;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
+ quot = serial_omap_get_divisor(port, baud);
+
+ up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
+ UART_FCR_ENABLE_FIFO;
+ if (up->use_dma)
+ up->fcr |= UART_FCR_DMA_SELECT;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * Modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+
+ /* FIFOs and DMA Settings */
+
+ /* FCR can be changed only when the
+ * baud clock is not running
+ * DLL_REG and DLH_REG set to 0.
+ */
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_DLL, 0);
+ serial_out(up, UART_DLM, 0);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, 0);
+ up->mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+ /* FIFO ENABLE, DMA MODE */
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ if (up->use_dma) {
+ serial_out(up, UART_TI752_TLR, 0);
+ serial_out(up, UART_OMAP_SCR,
+ (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ }
+
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_MCR, up->mcr);
+
+ /* Protocol, Baud Rate, and Interrupt Settings */
+
+ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_DISABLE);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, 0);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, cval);
+
+ if (baud > 230400 && baud != 3000000)
+ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE13X);
+ else
+ serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE16X);
+
+ /* Hardware Flow Control Configuration */
+
+ if (termios->c_cflag & CRTSCTS) {
+ efr |= (UART_EFR_CTS | UART_EFR_RTS);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+
+ up->mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
+ serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS);
+ serial_out(up, UART_LCR, cval);
+ }
+
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ /* Software Flow Control Configuration */
+ if (termios->c_iflag & (IXON | IXOFF))
+ serial_omap_configure_xonxoff(up, termios);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+}
+
+static void
+serial_omap_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char efr;
+
+ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+ efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ serial_out(up, UART_EFR, efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
+ serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+ serial_out(up, UART_EFR, efr);
+ serial_out(up, UART_LCR, 0);
+ /* Enable module level wake up */
+ serial_out(up, UART_OMAP_WER,
+ (state != 0) ? OMAP_UART_WER_MOD_WKUP : 0);
+}
+
+static void serial_omap_release_port(struct uart_port *port)
+{
+ dev_dbg(port->dev, "serial_omap_release_port+\n");
+}
+
+static int serial_omap_request_port(struct uart_port *port)
+{
+ dev_dbg(port->dev, "serial_omap_request_port+\n");
+ return 0;
+}
+
+static void serial_omap_config_port(struct uart_port *port, int flags)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
+ up->pdev->id);
+ up->port.type = PORT_OMAP;
+}
+
+static int
+serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ dev_dbg(port->dev, "serial_omap_verify_port+\n");
+ return -EINVAL;
+}
+
+static const char *
+serial_omap_type(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
+ return up->name;
+}
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+
+static struct uart_omap_port *serial_omap_console_ports[4];
+
+static struct uart_driver serial_omap_reg;
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline void wait_for_xmitr(struct uart_omap_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = serial_in(up, UART_MSR);
+ up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+ if (msr & UART_MSR_CTS)
+ break;
+ udelay(1);
+ }
+ }
+}
+
+static void serial_omap_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+static void
+serial_omap_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_omap_port *up = serial_omap_console_ports[co->index];
+ unsigned int ier;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, serial_omap_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+ /*
+ * The receive handling will happen properly because the
+ * receive ready bit will still be set; it is not cleared
+ * on read. However, modem control will not, we must
+ * call it if we have saved something in the saved flags
+ * while processing with interrupts off.
+ */
+ if (up->msr_saved_flags)
+ check_modem_status(up);
+}
+
+static int __init
+serial_omap_console_setup(struct console *co, char *options)
+{
+ struct uart_omap_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int r;
+
+ if (serial_omap_console_ports[co->index] == NULL)
+ return -ENODEV;
+ up = serial_omap_console_ports[co->index];
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ r = uart_set_options(&up->port, co, baud, parity, bits, flow);
+
+ return r;
+}
+
+static struct console serial_omap_console = {
+ .name = OMAP_SERIAL_NAME,
+ .write = serial_omap_console_write,
+ .device = uart_console_device,
+ .setup = serial_omap_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_omap_reg,
+};
+
+static void serial_omap_add_console_port(struct uart_omap_port *up)
+{
+ serial_omap_console_ports[up->pdev->id] = up;
+}
+
+#define OMAP_CONSOLE (&serial_omap_console)
+
+#else
+
+#define OMAP_CONSOLE NULL
+
+static inline void serial_omap_add_console_port(struct uart_omap_port *up)
+{}
+
+#endif
+
+struct uart_ops serial_omap_pops = {
+ .tx_empty = serial_omap_tx_empty,
+ .set_mctrl = serial_omap_set_mctrl,
+ .get_mctrl = serial_omap_get_mctrl,
+ .stop_tx = serial_omap_stop_tx,
+ .start_tx = serial_omap_start_tx,
+ .stop_rx = serial_omap_stop_rx,
+ .enable_ms = serial_omap_enable_ms,
+ .break_ctl = serial_omap_break_ctl,
+ .startup = serial_omap_startup,
+ .shutdown = serial_omap_shutdown,
+ .set_termios = serial_omap_set_termios,
+ .pm = serial_omap_pm,
+ .type = serial_omap_type,
+ .release_port = serial_omap_release_port,
+ .request_port = serial_omap_request_port,
+ .config_port = serial_omap_config_port,
+ .verify_port = serial_omap_verify_port,
+};
+
+static struct uart_driver serial_omap_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "OMAP-SERIAL",
+ .dev_name = OMAP_SERIAL_NAME,
+ .nr = OMAP_MAX_HSUART_PORTS,
+ .cons = OMAP_CONSOLE,
+};
+
+static int
+serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct uart_omap_port *up = platform_get_drvdata(pdev);
+
+ if (up)
+ uart_suspend_port(&serial_omap_reg, &up->port);
+ return 0;
+}
+
+static int serial_omap_resume(struct platform_device *dev)
+{
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+ if (up)
+ uart_resume_port(&serial_omap_reg, &up->port);
+ return 0;
+}
+
+static void serial_omap_rx_timeout(unsigned long uart_no)
+{
+ struct uart_omap_port *up = ui[uart_no];
+ unsigned int curr_dma_pos, curr_transmitted_size;
+ unsigned int ret = 0;
+
+ curr_dma_pos = omap_get_dma_dst_pos(up->uart_dma.rx_dma_channel);
+ if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
+ (curr_dma_pos == 0)) {
+ if (jiffies_to_msecs(jiffies - up->port_activity) <
+ RX_TIMEOUT) {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ } else {
+ serial_omap_stop_rxdma(up);
+ up->ier |= UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+ }
+ return;
+ }
+
+ curr_transmitted_size = curr_dma_pos -
+ up->uart_dma.prev_rx_dma_pos;
+ up->port.icount.rx += curr_transmitted_size;
+ tty_insert_flip_string(up->port.state->port.tty,
+ up->uart_dma.rx_buf +
+ (up->uart_dma.prev_rx_dma_pos -
+ up->uart_dma.rx_buf_dma_phys),
+ curr_transmitted_size);
+ tty_flip_buffer_push(up->port.state->port.tty);
+ up->uart_dma.prev_rx_dma_pos = curr_dma_pos;
+ if (up->uart_dma.rx_buf_size +
+ up->uart_dma.rx_buf_dma_phys == curr_dma_pos) {
+ ret = serial_omap_start_rxdma(up);
+ if (ret < 0) {
+ serial_omap_stop_rxdma(up);
+ up->ier |= UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+ }
+ } else {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ }
+ up->port_activity = jiffies;
+}
+
+static void uart_rx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ return;
+}
+
+static int serial_omap_start_rxdma(struct uart_omap_port *up)
+{
+ int ret = 0;
+
+ if (up->uart_dma.rx_dma_channel == -1) {
+ ret = omap_request_dma(up->uart_dma.uart_dma_rx,
+ "UART Rx DMA",
+ (void *)uart_rx_dma_callback, up,
+ &(up->uart_dma.rx_dma_channel));
+ if (ret < 0)
+ return ret;
+
+ omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ up->uart_dma.rx_buf_dma_phys, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.rx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_rx, 0);
+ }
+ up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
+ /*
+ * TBD: Should be done in omap_start_dma function
+ * Patch proposed to LO pending in LO.
+ */
+ if (cpu_is_omap44xx())
+ omap_writel(0, OMAP44XX_DMA4_BASE
+ + OMAP_DMA4_CDAC(up->uart_dma.rx_dma_channel));
+ else
+ omap_writel(0, OMAP34XX_DMA4_BASE
+ + OMAP_DMA4_CDAC(up->uart_dma.rx_dma_channel));
+ omap_start_dma(up->uart_dma.rx_dma_channel);
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ up->uart_dma.rx_dma_used = true;
+ return ret;
+}
+
+static void serial_omap_continue_tx(struct uart_omap_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int start = up->uart_dma.tx_buf_dma_phys
+ + (xmit->tail & (UART_XMIT_SIZE - 1));
+
+ if (uart_circ_empty(xmit))
+ return;
+
+ up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
+ /*
+ * It is a circular buffer. See if the buffer has wounded back.
+ * If yes it will have to be transferred in two separate dma
+ * transfers
+ */
+ if (start + up->uart_dma.tx_buf_size >=
+ up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
+ up->uart_dma.tx_buf_size =
+ (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start;
+ omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC, start, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.tx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_tx, 0);
+ wmb();
+ omap_start_dma(up->uart_dma.tx_dma_channel);
+}
+
+static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)data;
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ xmit->tail = (xmit->tail + up->uart_dma.tx_buf_size) & \
+ (UART_XMIT_SIZE - 1);
+ up->port.icount.tx += up->uart_dma.tx_buf_size;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit)) {
+ spin_lock(&(up->uart_dma.tx_lock));
+ serial_omap_stop_tx(&up->port);
+ up->uart_dma.tx_dma_used = false;
+ spin_unlock(&(up->uart_dma.tx_lock));
+ } else {
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ serial_omap_continue_tx(up);
+ }
+ up->port_activity = jiffies;
+ return;
+}
+
+static int serial_omap_probe(struct platform_device *pdev)
+{
+ struct uart_omap_port *up;
+ struct resource *mem, *irq, *dma_tx, *dma_rx;
+ struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
+ int ret = -ENOSPC;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
+ pdev->dev.driver->name)) {
+ dev_err(&pdev->dev, "memory region already claimed\n");
+ return -EBUSY;
+ }
+
+ dma_rx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!dma_rx) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dma_tx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!dma_tx) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ ret = -ENOMEM;
+ goto do_release_region;
+ }
+ sprintf(up->name, "OMAP UART%d", pdev->id);
+ up->pdev = pdev;
+ up->port.dev = &pdev->dev;
+ up->port.type = PORT_OMAP;
+ up->port.iotype = UPIO_MEM;
+ up->port.irq = irq->start;
+
+ up->port.regshift = 2;
+ up->port.fifosize = 64;
+ up->port.ops = &serial_omap_pops;
+ up->port.line = pdev->id;
+
+ up->port.membase = omap_up_info->membase;
+ up->port.mapbase = omap_up_info->mapbase;
+ up->port.flags = omap_up_info->flags;
+ up->port.irqflags = omap_up_info->irqflags;
+ up->port.uartclk = omap_up_info->uartclk;
+ up->uart_dma.uart_base = mem->start;
+
+ if (omap_up_info->dma_enabled) {
+ up->uart_dma.uart_dma_tx = dma_tx->start;
+ up->uart_dma.uart_dma_rx = dma_rx->start;
+ up->use_dma = 1;
+ up->uart_dma.rx_buf_size = 4096;
+ up->uart_dma.rx_timeout = 1;
+ spin_lock_init(&(up->uart_dma.tx_lock));
+ spin_lock_init(&(up->uart_dma.rx_lock));
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+ ui[pdev->id] = up;
+ serial_omap_add_console_port(up);
+
+ ret = uart_add_one_port(&serial_omap_reg, &up->port);
+ if (ret != 0)
+ goto do_release_region;
+
+ platform_set_drvdata(pdev, up);
+ return 0;
+err:
+ dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
+ pdev->id, __func__, ret);
+do_release_region:
+ release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ return ret;
+}
+
+static int serial_omap_remove(struct platform_device *dev)
+{
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+ if (up) {
+ uart_remove_one_port(&serial_omap_reg, &up->port);
+ kfree(up);
+ }
+ return 0;
+}
+
+static struct platform_driver serial_omap_driver = {
+ .probe = serial_omap_probe,
+ .remove = serial_omap_remove,
+
+ .suspend = serial_omap_suspend,
+ .resume = serial_omap_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+int __init serial_omap_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&serial_omap_reg);
+ if (ret != 0)
+ return ret;
+ ret = platform_driver_register(&serial_omap_driver);
+ if (ret != 0)
+ uart_unregister_driver(&serial_omap_reg);
+ return ret;
+}
+
+void __exit serial_omap_exit(void)
+{
+ platform_driver_unregister(&serial_omap_driver);
+ uart_unregister_driver(&serial_omap_reg);
+}
+
+module_init(serial_omap_init);
+module_exit(serial_omap_exit);
+
+MODULE_DESCRIPTION("OMAP High Speed UART driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0495fa651225..e178cf81c109 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1274,6 +1274,10 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (is_root_hub(urb->dev))
return 0;
+ if (usb_endpoint_xfer_control(&urb->ep->desc))
+ urb->transfer_flags = URB_NO_SETUP_DMA_MAP |
+ URB_NO_TRANSFER_DMA_MAP;
+
if (usb_endpoint_xfer_control(&urb->ep->desc)
&& !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
if (hcd->self.uses_dma) {
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
index d877c361abda..83862a523943 100644
--- a/drivers/video/omap2/Kconfig
+++ b/drivers/video/omap2/Kconfig
@@ -1,9 +1,6 @@
config OMAP2_VRAM
bool
-config OMAP2_VRFB
- bool
-
source "drivers/video/omap2/dss/Kconfig"
source "drivers/video/omap2/omapfb/Kconfig"
source "drivers/video/omap2/displays/Kconfig"
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index b12a59c9c50a..42acbdc19699 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -13,10 +13,21 @@ config PANEL_SHARP_LS037V7DW01
help
LCD Panel used in TI's SDP3430 and EVM boards
+config PANEL_PICO_DLP
+ tristate "OMAP PICO DLP Panel"
+ depends on OMAP2_DSS
+ help
+ LCD Panel used in TI's SDP4430 and EVM boards
+
config PANEL_TAAL
tristate "Taal DSI Panel"
depends on OMAP2_DSS_DSI
help
Taal DSI command mode panel from TPO.
+config LCD_4430SDP
+ tristate "OMAP4 LCD Panel"
+ help
+ LCD Panel used in OMAP4
+
endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index 955646440b3a..a2774241416e 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
+obj-$(CONFIG_LCD_4430SDP) += lcd_4430sdp.o
+obj-$(CONFIG_PANEL_PICO_DLP) += panel-picodlp.o
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
new file mode 100644
index 000000000000..1e60881bd261
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -0,0 +1,514 @@
+/*
+ * pico_i2c_driver.c
+ * pico DLP driver
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Author: mythripk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <plat/display.h>
+#include<../drivers/video/omap2/dss/dss.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include "panel-picodlp.h"
+
+
+#define DRIVER_NAME "pico_i2c"
+/* How much data we can put into single write block */
+#define MAX_I2C_WRITE_BLOCK_SIZE 32
+#define PICO_MAJOR 1 /* 2 bits */
+#define PICO_MINOR 1 /* 2 bits */
+#define DSI_DIV2 (0x40C)
+#define DSI_DIV_LCD (16)
+#define DSI_DIV_PCD (0)
+#define DSI_CONTROL2 (0x238)
+
+static int display_control_reg = (0x58000000 + 0x1000);
+extern void __iomem *dispc_base;
+
+static struct omap_video_timings pico_ls_timings = {
+ .x_res = 864,
+ .y_res = 480,
+ .hsw = 7,
+ .hfp = 11,
+ .hbp = 7,
+
+ .vsw = 2,
+ .vfp = 3,
+ .vbp = 14,
+};
+
+struct pico {
+ struct i2c_client *client;
+ struct mutex xfer_lock;
+ } *sd;
+
+
+static int dlp_read_block(int reg, u8 *data, int len);
+static int pico_i2c_write(int reg, u32 value);
+
+static int dlp_write_block(int reg, const u8 *data, int len)
+{
+ unsigned char wb[MAX_I2C_WRITE_BLOCK_SIZE + 1];
+ struct i2c_msg msg;
+ int r;
+ int i;
+
+ if (len < 1 ||
+ len > MAX_I2C_WRITE_BLOCK_SIZE) {
+ dev_info(&sd->client->dev, "too long syn_write_block len %d\n",
+ len);
+ return -EIO;
+ }
+
+ wb[0] = reg & 0xff;
+
+ for (i = 0; i < len; i++)
+ wb[i + 1] = data[i];
+
+ mutex_lock(&sd->xfer_lock);
+
+ msg.addr = sd->client->addr;
+ msg.flags = 0;
+ msg.len = len + 1;
+ msg.buf = wb;
+
+ r = i2c_transfer(sd->client->adapter, &msg, 1);
+ mutex_unlock(&sd->xfer_lock);
+
+ if (r == 1) {
+ for (i = 0; i < len; i++)
+ dev_info(&sd->client->dev,
+ "addr %x bw 0x%02x[%d]: 0x%02x\n",
+ sd->client->addr, reg + i, i, data[i]);
+ }
+
+
+ if (r == 1)
+ return 0;
+
+ return r;
+}
+
+static int pico_i2c_write(int reg, u32 value)
+{
+ u8 data[4];
+
+ data[0] = (value & 0xFF000000) >> 24;
+ data[1] = (value & 0x00FF0000) >> 16;
+ data[2] = (value & 0x0000FF00) >> 8;
+ data[3] = (value & 0x000000FF);
+
+ return dlp_write_block(reg, data, 4);
+}
+
+static int dlp_read_block(int reg, u8 *data, int len)
+{
+ unsigned char wb[2];
+ struct i2c_msg msg[2];
+ int r;
+ mutex_lock(&sd->xfer_lock);
+ wb[0] = 0x15 & 0xff;
+ wb[1] = reg & 0xff;
+ msg[0].addr = sd->client->addr;
+ msg[0].len = 2;
+ msg[0].flags = 0;
+ msg[0].buf = wb;
+ msg[1].addr = sd->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = len;
+ msg[1].buf = data;
+
+ r = i2c_transfer(sd->client->adapter, msg, 2);
+ mutex_unlock(&sd->xfer_lock);
+
+
+ if (r == 2) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ dev_info(&sd->client->dev,
+ "addr %x br 0x%02x[%d]: 0x%02x\n",
+ sd->client->addr, reg + i, i, data[i]);
+ }
+
+
+ if (r == 2)
+ return len;
+
+ return r;
+}
+
+
+static int pico_i2c_read(int reg)
+{
+ int r;
+ u8 data[4];
+ data[1] = data[2] = data[3] = data[0] = 0;
+
+ r = dlp_read_block(reg, data, 4);
+ return (int)data[3] | ((int)(data[2]) << 8) | ((int)(data[1]) << 16) | ((int)(data[0]) << 24);
+}
+
+/*
+ * Configure datapath for splash image operation
+ * @param flash_address - I - splash image to load from flash
+ * @param flash_num_bytes - I - splash image to load from flash
+ * @param CMT_SEQz - I - select mailbox to load data to: 0=sequence/DRC, 1=CMT/splash
+ * @param table_number - I - splash image to load from flash
+ * @return 0 - no errors
+ * 1 - invalid flash address specified
+ * 2 - invalid mailbox specified
+ * 3 - invalid table_number / mailbox combination
+ */
+int dpp2600_flash_dma(int flash_address, int flash_num_bytes, int CMT_SEQz, int table_number)
+
+{
+ int mailbox_address, mailbox_select;
+
+ /* check argument validity */
+ if (flash_address > 0x1fffff)
+ return 1;
+ if (CMT_SEQz > 1)
+ return 2;
+ if ((CMT_SEQz == 0 && table_number > 6) ||
+ (CMT_SEQz == 1 && table_number > 5))
+ return 3;
+ /* set mailbox parameters */
+ if (CMT_SEQz) {
+ mailbox_address = CMT_SPLASH_LUT_START_ADDR;
+ mailbox_select = CMT_SPLASH_LUT_DEST_SELECT;
+ } else {
+ mailbox_address = SEQ_RESET_LUT_START_ADDR;
+ mailbox_select = SEQ_RESET_LUT_DEST_SELECT;
+ }
+
+ /* configure DMA from flash to LUT */
+ pico_i2c_write(PBC_CONTROL, 0);
+ pico_i2c_write(FLASH_START_ADDR, flash_address);
+ pico_i2c_write(FLASH_READ_BYTES, flash_num_bytes);
+ pico_i2c_write(mailbox_address, 0);
+ pico_i2c_write(mailbox_select, table_number);
+ /* transfer control to flash controller */
+ pico_i2c_write(PBC_CONTROL, 1);
+ mdelay(1000);
+ /* return register access to I2c */
+ pico_i2c_write(PBC_CONTROL, 0);
+ /* close LUT access */
+ pico_i2c_write(mailbox_select, 0);
+ return 0;
+}
+
+/* Configure datapath for parallel RGB operation */
+static void dpp2600_config_rgb(void)
+{
+ /* enable video board output drivers */
+ pico_i2c_write(SEQ_CONTROL, 0);
+ pico_i2c_write(ACTGEN_CONTROL, 0x10);
+ pico_i2c_write(SEQUENCE_MODE, SEQ_LOCK);
+ pico_i2c_write(DATA_FORMAT, RGB888);
+ pico_i2c_write(INPUT_RESOLUTION, WVGA_864_LANDSCAPE);
+ pico_i2c_write(INPUT_SOURCE, PARALLEL_RGB);
+ pico_i2c_write(CPU_IF_SYNC_METHOD, 1);
+ /* turn image back on */
+ pico_i2c_write(SEQ_CONTROL, 1);
+}
+
+/*
+ * Configure datapath for splash image operation
+ * @param image_number - I - splash image to load from flash
+ * @return 0 - no errors
+ * 1 - invalid image_number specified
+ */
+int dpp2600_config_splash(int image_number)
+{
+ int address, size, resolution;
+ printk("dpp2600 config splash");
+ resolution = QWVGA_LANDSCAPE;
+ switch (image_number) {
+ case 0:
+ address = SPLASH_0_START_ADDR;
+ size = SPLASH_0_SIZE;
+ break;
+ case 1:
+ address = SPLASH_1_START_ADDR;
+ size = SPLASH_1_SIZE;
+ break;
+ case 2:
+ address = SPLASH_2_START_ADDR;
+ size = SPLASH_2_SIZE;
+ break;
+ case 3:
+ address = SPLASH_3_START_ADDR;
+ size = SPLASH_3_SIZE;
+ break;
+ case 4:
+ address = OPT_SPLASH_0_START_ADDR;
+ size = OPT_SPLASH_0_SIZE;
+ resolution = WVGA_DMD_OPTICAL_TEST;
+ break;
+ default:
+ return 1;
+ };
+ /* configure sequence, data format and resolution */
+ pico_i2c_write(SEQ_CONTROL, 0);
+ pico_i2c_write(SEQUENCE_MODE, SEQ_FREE_RUN);
+ pico_i2c_write(DATA_FORMAT, RGB565);
+ pico_i2c_write(INPUT_RESOLUTION, resolution);
+ pico_i2c_write(INPUT_SOURCE, SPLASH_SCREEN);
+ dpp2600_flash_dma(address, size, 1, SPLASH_LUT);
+ /* turn image back on */
+ pico_i2c_write(SEQ_CONTROL, 1);
+ return 0;
+}
+
+/*
+ * Modify contents of a 32-bit register
+ * @param anAddr Register address
+ * @param aClearMask Any bits set in this mask will be cleared
+ * @param aSetMask Bits to be set after the clear
+ */
+void modify_pico_register(unsigned int Addr, unsigned int ClearMask,
+ unsigned int SetMask)
+{
+ u32 val;
+ val = __raw_readl(Addr);
+ val &= ~(ClearMask);
+ val |= (SetMask);
+ __raw_writel(val, Addr);
+}
+
+/*
+ * Configure datapath for test pattern generator operation
+ *
+ * @param pattern_select - I - color table to load
+ *
+ * @return 0 - no errors
+ * 1 - invalid pattern specified
+ */
+int dpp2600_config_tpg(int pattern_select)
+{
+ if (pattern_select > TPG_ANSI_CHECKERBOARD)
+ return 1;
+ pico_i2c_write(SEQ_CONTROL, 0);
+ pico_i2c_write(INPUT_RESOLUTION, WVGA_854_LANDSCAPE);
+ pico_i2c_write(SEQUENCE_MODE, SEQ_LOCK);
+ pico_i2c_write(TEST_PAT_SELECT, pattern_select);
+ pico_i2c_write(INPUT_SOURCE, 1);
+ pico_i2c_write(SEQ_CONTROL, 1);
+ return 0;
+}
+
+static int pico_i2c_initialize(void)
+{
+
+ mutex_init(&sd->xfer_lock);
+ mdelay(100);
+ /* pico Soft reset */
+ pico_i2c_write(SOFT_RESET, 1);
+ /*Front end reset*/
+ pico_i2c_write(DMD_PARK_TRIGGER, 1);
+ /* write the software version number to a spare register field */
+ pico_i2c_write(MISC_REG, PICO_MAJOR<<2 | PICO_MINOR);
+ pico_i2c_write(SEQ_CONTROL, 0);
+ pico_i2c_write(SEQ_VECTOR, 0x100);
+ pico_i2c_write(DMD_BLOCK_COUNT, 7);
+ pico_i2c_write(DMD_VCC_CONTROL, 0x109);
+ pico_i2c_write(DMD_PARK_PULSE_COUNT, 0xA);
+ pico_i2c_write(DMD_PARK_PULSE_WIDTH, 0xB);
+ pico_i2c_write(DMD_PARK_DELAY, 0x2ED);
+ pico_i2c_write(DMD_SHADOW_ENABLE, 0);
+ /* serial flash common config */
+ pico_i2c_write(FLASH_OPCODE, 0xB);
+ pico_i2c_write(FLASH_DUMMY_BYTES, 1);
+ pico_i2c_write(FLASH_ADDR_BYTES, 3);
+ /* configure DMA from flash to LUT */
+ dpp2600_flash_dma(CMT_LUT_0_START_ADDR, CMT_LUT_0_SIZE, 1, CMT_LUT_ALL);
+ /* SEQ and DRC look-up tables */
+ dpp2600_flash_dma(SEQUENCE_0_START_ADDR, SEQUENCE_0_SIZE, 0, SEQ_SEQ_LUT);
+ dpp2600_flash_dma(DRC_TABLE_0_START_ADDR, DRC_TABLE_0_SIZE, 0, SEQ_DRC_LUT_ALL);
+ /* frame buffer memory controller enable */
+ pico_i2c_write(SDC_ENABLE, 1);
+ /* AGC control */
+ pico_i2c_write(AGC_CTRL, 7);
+ /*CCA */
+ pico_i2c_write(CCA_C1A, 0x100);
+ pico_i2c_write(CCA_C1B, 0x000);
+ pico_i2c_write(CCA_C1C, 0x000);
+ pico_i2c_write(CCA_C2A, 0x000);
+ pico_i2c_write(CCA_C2B, 0x100);
+ pico_i2c_write(CCA_C2C, 0x000);
+ pico_i2c_write(CCA_C3A, 0x000);
+ pico_i2c_write(CCA_C3B, 0x000);
+ pico_i2c_write(CCA_C3C, 0x100);
+ pico_i2c_write(CCA_C7A, 0x100);
+ pico_i2c_write(CCA_C7B, 0x100);
+ pico_i2c_write(CCA_C7C, 0x100);
+ pico_i2c_write(CCA_ENABLE, 1);
+ /* main datapath setup */
+ pico_i2c_write(CPU_IF_MODE, 1);
+ pico_i2c_write(SHORT_FLIP, 1);
+ pico_i2c_write(CURTAIN_CONTROL, 0);
+ /* display Logo splash image */
+ dpp2600_config_splash(1);
+ pico_i2c_write(DMD_PARK_TRIGGER, 0);
+ /* LED PWM and enables */
+ pico_i2c_write(R_DRIVE_CURRENT, 0x298);
+ pico_i2c_write(G_DRIVE_CURRENT, 0x298);
+ pico_i2c_write(B_DRIVE_CURRENT, 0x298);
+ pico_i2c_write(RGB_DRIVER_ENABLE, 7);
+ mdelay(10000);
+ dpp2600_config_rgb();
+ return 0;
+
+
+}
+
+static int pico_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ printk("pico probe called");
+ sd = kzalloc(sizeof(struct pico), GFP_KERNEL);
+ if (sd == NULL)
+ return -ENOMEM;
+ i2c_set_clientdata(client, sd);
+ sd->client = client;
+ return 0;
+}
+
+static int __exit pico_remove(struct i2c_client *client)
+{
+ struct pico *sd1 = i2c_get_clientdata(client);
+ kfree(sd1);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id pico_id[] = {
+ { "picoDLP_i2c_driver", 0 },
+ { },
+};
+
+
+static int picoDLP_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ printk("pico DLP init is called ");
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ display_control_reg = dispc_base;
+ /* Specify the Display Controller Logic Clock Divisor*/
+ modify_pico_register(display_control_reg + DSI_DIV2, 0xFF |
+ (0XFF << DSI_DIV_LCD), (1 << DSI_DIV_LCD) | (4 << DSI_DIV_PCD));
+ /* LCD output Enabled */
+ modify_pico_register(display_control_reg + DSI_CONTROL2, (1<<11), 0x00000000);
+ pico_i2c_initialize();
+ return 0;
+
+}
+static void pico_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres)
+{
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
+}
+
+static int picoDLP_panel_probe(struct omap_dss_device *dssdev)
+{
+ dssdev->panel.config &= ~((OMAP_DSS_LCD_IPC) | (OMAP_DSS_LCD_IEO));
+ dssdev->panel.config = (OMAP_DSS_LCD_TFT) | (OMAP_DSS_LCD_ONOFF) |
+ (OMAP_DSS_LCD_IHS) |
+ (OMAP_DSS_LCD_IVS) ;
+ dssdev->panel.acb = 0x0;
+ dssdev->panel.timings = pico_ls_timings;
+ dssdev->get_resolution = pico_get_resolution;
+
+ return 0;
+}
+
+static int picoDLP_panel_remove(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+
+static int picoDLP_panel_disable(struct omap_dss_device *dssdev)
+{
+ /* Turn of DLP Power */
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+ return 0;
+}
+
+static struct omap_dss_driver picoDLP_driver = {
+ .probe = picoDLP_panel_probe,
+ .remove = picoDLP_panel_remove,
+ .enable = picoDLP_panel_enable,
+ .disable = picoDLP_panel_disable,
+ .driver = {
+ .name = "picoDLP_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct i2c_driver pico_i2c_driver = {
+ .driver = {
+ .name = "pico_i2c_driver",
+ },
+ .probe = pico_probe,
+ .remove = __exit_p(pico_remove),
+ .id_table = pico_id,
+
+};
+
+static int __init pico_i2c_init(void)
+{
+ int r;
+ r = i2c_add_driver(&pico_i2c_driver);
+ if (r < 0) {
+ printk(KERN_WARNING DRIVER_NAME
+ " driver registration failed\n");
+ return r;
+ }
+ omap_dss_register_driver(&picoDLP_driver);
+ return 0;
+}
+
+
+static void __exit pico_i2c_exit(void)
+{
+ i2c_del_driver(&pico_i2c_driver);
+ omap_dss_unregister_driver(&picoDLP_driver);
+}
+
+
+module_init(pico_i2c_init);
+module_exit(pico_i2c_exit);
+
+
+
+MODULE_DESCRIPTION("pico DLP driver");
+MODULE_LICENSE("GPL");
+
+
+
diff --git a/drivers/video/omap2/displays/panel-picodlp.h b/drivers/video/omap2/displays/panel-picodlp.h
new file mode 100644
index 000000000000..01c1b9528467
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-picodlp.h
@@ -0,0 +1,281 @@
+#define MAIN_STATUS 0x03
+#define PBC_CONTROL 0x08
+#define INPUT_SOURCE 0x0B
+#define INPUT_RESOLUTION 0x0C
+#define DATA_FORMAT 0x0D
+#define IMG_ROTATION 0x0E
+#define LONG_FLIP 0x0F
+#define SHORT_FLIP 0x10
+#define TEST_PAT_SELECT 0x11
+#define R_DRIVE_CURRENT 0x12
+#define G_DRIVE_CURRENT 0x13
+#define B_DRIVE_CURRENT 0x14
+#define READ_REG_SELECT 0x15
+#define RGB_DRIVER_ENABLE 0x16
+
+#define CPU_IF_MODE 0x18
+#define FRAME_RATE 0x19
+#define CPU_IF_SYNC_METHOD 0x1A
+#define CPU_IF_SOF 0x1B
+#define CPU_IF_EOF 0x1C
+#define CPU_IF_SLEEP 0x1D
+
+#define SEQUENCE_MODE 0x1E
+#define SOFT_RESET 0x1F
+#define FRONT_END_RESET 0x21
+#define AUTO_PWR_ENABLE 0x22
+
+#define VSYNC_LINE_DELAY 0x23
+#define CPU_PI_HORIZ_START 0x24
+#define CPU_PI_VERT_START 0x25
+#define CPU_PI_HORIZ_WIDTH 0x26
+#define CPU_PI_VERT_HEIGHT 0x27
+
+#define PIXEL_MASK_CROP 0x28
+#define CROP_FIRST_LINE 0x29
+#define CROP_LAST_LINE 0x2A
+#define CROP_FIRST_PIXEL 0x2B
+#define CROP_LAST_PIXEL 0x2C
+#define DMD_PARK_TRIGGER 0x2D
+
+#define MISC_REG 0x30
+
+/* AGC registers */
+#define AGC_CTRL 0x50
+#define AGC_CLIPPED_PIXS 0x55
+#define AGC_BRIGHT_PIXS 0x56
+#define AGC_BG_PIXS 0x57
+#define AGC_SAFETY_MARGIN 0x17
+
+/* CCA registers */
+#define CCA_ENABLE 0x5E
+#define CCA_C1A 0x5F
+#define CCA_C1B 0x60
+#define CCA_C1C 0x61
+#define CCA_C2A 0x62
+#define CCA_C2B 0x63
+#define CCA_C2C 0x64
+#define CCA_C3A 0x65
+#define CCA_C3B 0x66
+#define CCA_C3C 0x67
+#define CCA_C7A 0x71
+#define CCA_C7B 0x72
+#define CCA_C7C 0x73
+
+/* registers for DMA operations from flash to DPP2600 LUTs */
+#define FLASH_ADDR_BYTES 0x74
+#define FLASH_DUMMY_BYTES 0x75
+#define FLASH_WRITE_BYTES 0x76
+#define FLASH_READ_BYTES 0x77
+#define FLASH_OPCODE 0x78
+#define FLASH_START_ADDR 0x79
+#define FLASH_DUMMY2 0x7A
+#define FLASH_WRITE_DATA 0x7B
+
+#define TEMPORAL_DITH_DISABLE 0x7E
+#define SEQ_CONTROL 0x82
+#define SEQ_VECTOR 0x83
+#define DMD_BLOCK_COUNT 0x84
+#define DMD_VCC_CONTROL 0x86
+#define DMD_PARK_PULSE_COUNT 0x87
+#define DMD_PARK_PULSE_WIDTH 0x88
+#define DMD_PARK_DELAY 0x89
+#define DMD_SHADOW_ENABLE 0x8E
+#define SEQ_STATUS 0x8F
+#define FLASH_CLOCK_CONTROL 0x98
+#define DMD_PARK 0x2D
+
+#define SDRAM_BIST_ENABLE 0x46
+#define DDR_DRIVER_STRENGTH 0x9A
+#define SDC_ENABLE 0x9D
+#define SDC_BUFF_SWAP_DISABLE 0xA3
+#define CURTAIN_CONTROL 0xA6
+#define DDR_BUS_SWAP_ENABLE 0xA7
+#define DMD_TRC_ENABLE 0xA8
+#define DMD_BUS_SWAP_ENABLE 0xA9
+
+#define ACTGEN_ENABLE 0xAE
+#define ACTGEN_CONTROL 0xAF
+#define ACTGEN_HORIZ_BP 0xB0
+#define ACTGEN_VERT_BP 0xB1
+
+/* LUT access */
+#define CMT_SPLASH_LUT_START_ADDR 0xFA
+#define CMT_SPLASH_LUT_DEST_SELECT 0xFB
+#define CMT_SPLASH_LUT_DATA 0xFC
+#define SEQ_RESET_LUT_START_ADDR 0xFD
+#define SEQ_RESET_LUT_DEST_SELECT 0xFE
+#define SEQ_RESET_LUT_DATA 0xFF
+
+/* input source defines */
+#define PARALLEL_RGB 0
+#define INT_TEST_PATTERN 1
+#define SPLASH_SCREEN 2
+#define CPU_INTF 3
+#define BT656 4
+
+/* input resolution defines */
+#define QVGA_PORTRAIT 0 /* (240h*320v) */
+#define QVGA_LANDSCAPE 1 /* (320h*240v) */
+#define QWVGA_LANDSCAPE 3 /* (427h*240v) */
+#define VGA_PORTRAIT_2_3 4 /* (430h*640v) */
+#define VGA_LANDSCAPE_3_2 5 /* (640h*430v) */
+#define VGA_PORTRAIT 6 /* (480h*640v) */
+#define VGA_LANDSCAPE 7 /* (640h*480v) */
+#define WVGA_720_PORTRAIT 8 /* (480h*720v) */
+#define WVGA_720_LANDSCAPE 9 /* (720h*480v) */
+#define WVGA_752_PORTRAIT 10 /* (480h*752v) */
+#define WVGA_752_LANDSCAPE 11 /* (752h*480v) */
+#define WVGA_800_PORTRAIT 12 /* (480h*800v) */
+#define WVGA_800_LANDSCAPE 13 /* (800h*480v) */
+#define WVGA_852_PORTRAIT 14 /* (480h*852v) */
+#define WVGA_852_LANDSCAPE 15 /* (852h*480v) */
+#define WVGA_853_PORTRAIT 16 /* (480h*853v) */
+#define WVGA_853_LANDSCAPE 17 /* (853h*480v) */
+#define WVGA_854_PORTRAIT 18 /* (480h*854v) */
+#define WVGA_854_LANDSCAPE 19 /* (854h*480v) */
+#define WVGA_864_PORTRAIT 20 /* (480h*864v) */
+#define WVGA_864_LANDSCAPE 21 /* (864h*480v) */
+#define NTSC_LANDSCAPE 23 /* (720h*240v) */
+#define PAL_LANDSCAPE 25 /* (720h*288v) */
+#define VGA_DMD_OPTICAL_TEST 33 /* (456h*684v) */
+#define WVGA_DMD_OPTICAL_TEST 35 /* (608h*684v) */
+
+/* data format defines */
+#define RGB565 0
+#define RGB666 1
+#define RGB888 2
+
+/* test pattern defines */
+#define TPG_CHECKERBOARD 0
+#define TPG_BLACK 1
+#define TPG_WHITE 2
+#define TPG_RED 3
+#define TPG_BLUE 4
+#define TPG_GREEN 5
+#define TPG_VLINES_BLACK 6
+#define TPG_HLINES_BLACK 7
+#define TPG_VLINES_ALT 8
+#define TPG_HLINES_ALT 9
+#define TPG_DIAG_LINES 10
+#define TPG_GREYRAMP_VERT 11
+#define TPG_GREYRAMP_HORIZ 12
+#define TPG_ANSI_CHECKERBOARD 13
+
+/* sequence mode defines */
+#define SEQ_FREE_RUN 0
+#define SEQ_LOCK 1
+
+/* curtain color defines */
+#define CURTAIN_BLACK 0
+#define CURTAIN_RED 1
+#define CURTAIN_GREEN 2
+#define CURTAIN_BLUE 3
+#define CURTAIN_YELLOW 4
+#define CURTAIN_MAGENTA 5
+#define CURTAIN_CYAN 6
+#define CURTAIN_WHITE 7
+
+/* LUT defines */
+#define CMT_LUT_NONE 0
+#define CMT_LUT_GREEN 1
+#define CMT_LUT_RED 2
+#define CMT_LUT_BLUE 3
+#define CMT_LUT_ALL 4
+#define SPLASH_LUT 5
+
+#define SEQ_LUT_NONE 0
+#define SEQ_DRC_LUT_0 1
+#define SEQ_DRC_LUT_1 2
+#define SEQ_DRC_LUT_2 3
+#define SEQ_DRC_LUT_3 4
+#define SEQ_SEQ_LUT 5
+#define SEQ_DRC_LUT_ALL 6
+#define WPC_PROGRAM_LUT 7
+
+/*#define DMA_STATUS BIT8 */
+
+
+#define BITSTREAM_START_ADDR 0x00000000
+#define BITSTREAM_SIZE 0x00040000
+
+#define WPC_FW_0_START_ADDR 0x00040000
+#define WPC_FW_0_SIZE 0x00000ce8
+
+#define SEQUENCE_0_START_ADDR 0x00044000
+#define SEQUENCE_0_SIZE 0x00001000
+
+#define SEQUENCE_1_START_ADDR 0x00045000
+#define SEQUENCE_1_SIZE 0x00000d10
+
+#define SEQUENCE_2_START_ADDR 0x00046000
+#define SEQUENCE_2_SIZE 0x00000d10
+
+#define SEQUENCE_3_START_ADDR 0x00047000
+#define SEQUENCE_3_SIZE 0x00000d10
+
+#define SEQUENCE_4_START_ADDR 0x00048000
+#define SEQUENCE_4_SIZE 0x00000d10
+
+#define SEQUENCE_5_START_ADDR 0x00049000
+#define SEQUENCE_5_SIZE 0x00000d10
+
+#define SEQUENCE_6_START_ADDR 0x0004a000
+#define SEQUENCE_6_SIZE 0x00000d10
+
+#define CMT_LUT_0_START_ADDR 0x0004b200
+#define CMT_LUT_0_SIZE 0x00000600
+
+#define CMT_LUT_1_START_ADDR 0x0004b800
+#define CMT_LUT_1_SIZE 0x00000600
+
+#define CMT_LUT_2_START_ADDR 0x0004be00
+#define CMT_LUT_2_SIZE 0x00000600
+
+#define CMT_LUT_3_START_ADDR 0x0004c400
+#define CMT_LUT_3_SIZE 0x00000600
+
+#define CMT_LUT_4_START_ADDR 0x0004ca00
+#define CMT_LUT_4_SIZE 0x00000600
+
+#define CMT_LUT_5_START_ADDR 0x0004d000
+#define CMT_LUT_5_SIZE 0x00000600
+
+#define CMT_LUT_6_START_ADDR 0x0004d600
+#define CMT_LUT_6_SIZE 0x00000600
+
+#define DRC_TABLE_0_START_ADDR 0x0004dc00
+#define DRC_TABLE_0_SIZE 0x00000100
+
+#define SPLASH_0_START_ADDR 0x0004dd00
+#define SPLASH_0_SIZE 0x00032280
+
+#define SEQUENCE_7_START_ADDR 0x00080000
+#define SEQUENCE_7_SIZE 0x00000d10
+
+#define SEQUENCE_8_START_ADDR 0x00081800
+#define SEQUENCE_8_SIZE 0x00000d10
+
+#define SEQUENCE_9_START_ADDR 0x00083000
+#define SEQUENCE_9_SIZE 0x00000d10
+
+#define CMT_LUT_7_START_ADDR 0x0008e000
+#define CMT_LUT_7_SIZE 0x00000600
+
+#define CMT_LUT_8_START_ADDR 0x0008e800
+#define CMT_LUT_8_SIZE 0x00000600
+
+#define CMT_LUT_9_START_ADDR 0x0008f000
+#define CMT_LUT_9_SIZE 0x00000600
+
+#define SPLASH_1_START_ADDR 0x0009a000
+#define SPLASH_1_SIZE 0x00032280
+
+#define SPLASH_2_START_ADDR 0x000cd000
+#define SPLASH_2_SIZE 0x00032280
+
+#define SPLASH_3_START_ADDR 0x00100000
+#define SPLASH_3_SIZE 0x00032280
+
+#define OPT_SPLASH_0_START_ADDR 0x00134000
+#define OPT_SPLASH_0_SIZE 0x000cb100
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 1f01dfc5e52e..cd9f353b642b 100644..100755
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -36,6 +36,7 @@
/* DSI Virtual channel. Hardcoded for now. */
#define TCH 0
+#define DCS_RESET 0x01
#define DCS_READ_NUM_ERRORS 0x05
#define DCS_READ_POWER_MODE 0x0a
#define DCS_READ_MADCTL 0x0b
@@ -110,12 +111,12 @@ static void hw_guard_wait(struct taal_data *td)
}
}
-static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
+static int taal_dcs_read_1(enum dsi lcd_ix, u8 dcs_cmd, u8 *data)
{
int r;
u8 buf[1];
- r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1);
+ r = dsi_vc_dcs_read(lcd_ix, TCH, dcs_cmd, buf, 1);
if (r < 0)
return r;
@@ -125,21 +126,20 @@ static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
return 0;
}
-static int taal_dcs_write_0(u8 dcs_cmd)
+static int taal_dcs_write_0(enum dsi lcd_ix, u8 dcs_cmd)
{
- return dsi_vc_dcs_write(TCH, &dcs_cmd, 1);
+ return dsi_vc_dcs_write(lcd_ix, TCH, &dcs_cmd, 1);
}
-static int taal_dcs_write_1(u8 dcs_cmd, u8 param)
+static int taal_dcs_write_1(enum dsi lcd_ix, u8 dcs_cmd, u8 param)
{
u8 buf[2];
buf[0] = dcs_cmd;
buf[1] = param;
- return dsi_vc_dcs_write(TCH, buf, 2);
+ return dsi_vc_dcs_write(lcd_ix, TCH, buf, 2);
}
-static int taal_sleep_in(struct taal_data *td)
-
+static int taal_sleep_in(enum dsi lcd_ix, struct taal_data *td)
{
u8 cmd;
int r;
@@ -147,7 +147,7 @@ static int taal_sleep_in(struct taal_data *td)
hw_guard_wait(td);
cmd = DCS_SLEEP_IN;
- r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1);
+ r = dsi_vc_dcs_write_nosync(lcd_ix, TCH, &cmd, 1);
if (r)
return r;
@@ -158,13 +158,27 @@ static int taal_sleep_in(struct taal_data *td)
return 0;
}
-static int taal_sleep_out(struct taal_data *td)
+static int taal_reset(enum dsi lcd_ix, struct taal_data *td)
+{
+ int r;
+
+ r = taal_dcs_write_0(lcd_ix, DCS_RESET);
+ if (r)
+ return r;
+
+ msleep(5);
+
+ return 0;
+
+}
+
+static int taal_sleep_out(enum dsi lcd_ix, struct taal_data *td)
{
int r;
hw_guard_wait(td);
- r = taal_dcs_write_0(DCS_SLEEP_OUT);
+ r = taal_dcs_write_0(lcd_ix, DCS_SLEEP_OUT);
if (r)
return r;
@@ -175,30 +189,30 @@ static int taal_sleep_out(struct taal_data *td)
return 0;
}
-static int taal_get_id(u8 *id1, u8 *id2, u8 *id3)
+static int taal_get_id(enum dsi lcd_ix, u8 *id1, u8 *id2, u8 *id3)
{
int r;
- r = taal_dcs_read_1(DCS_GET_ID1, id1);
+ r = taal_dcs_read_1(lcd_ix, DCS_GET_ID1, id1);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID2, id2);
+ r = taal_dcs_read_1(lcd_ix, DCS_GET_ID2, id2);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID3, id3);
+ r = taal_dcs_read_1(lcd_ix, DCS_GET_ID3, id3);
if (r)
return r;
return 0;
}
-static int taal_set_addr_mode(u8 rotate, bool mirror)
+static int taal_set_addr_mode(enum dsi lcd_ix, u8 rotate, bool mirror)
{
int r;
u8 mode;
int b5, b6, b7;
- r = taal_dcs_read_1(DCS_READ_MADCTL, &mode);
+ r = taal_dcs_read_1(lcd_ix, DCS_READ_MADCTL, &mode);
if (r)
return r;
@@ -232,12 +246,12 @@ static int taal_set_addr_mode(u8 rotate, bool mirror)
mode &= ~((1<<7) | (1<<6) | (1<<5));
mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
- return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode);
+ return taal_dcs_write_1(lcd_ix, DCS_MEM_ACC_CTRL, mode);
}
-static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
+static int taal_set_update_window(enum dsi lcd_ix, u16 x, u16 y, u16 w, u16 h)
{
- int r;
+ int r = 0;
u16 x1 = x;
u16 x2 = x + w - 1;
u16 y1 = y;
@@ -250,7 +264,7 @@ static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
buf[3] = (x2 >> 8) & 0xff;
buf[4] = (x2 >> 0) & 0xff;
- r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ r = dsi_vc_dcs_write_nosync(lcd_ix, TCH, buf, sizeof(buf));
if (r)
return r;
@@ -260,11 +274,11 @@ static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
buf[3] = (y2 >> 8) & 0xff;
buf[4] = (y2 >> 0) & 0xff;
- r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ r = dsi_vc_dcs_write_nosync(lcd_ix, TCH, buf, sizeof(buf));
if (r)
return r;
- dsi_vc_send_bta_sync(TCH);
+ dsi_vc_send_bta_sync(lcd_ix, TCH);
return r;
}
@@ -275,6 +289,8 @@ static int taal_bl_update_status(struct backlight_device *dev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
int level;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
dev->props.power == FB_BLANK_UNBLANK)
@@ -286,9 +302,9 @@ static int taal_bl_update_status(struct backlight_device *dev)
if (td->use_dsi_bl) {
if (td->enabled) {
- dsi_bus_lock();
- r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
- dsi_bus_unlock();
+ dsi_bus_lock(lcd_ix);
+ r = taal_dcs_write_1(lcd_ix, DCS_BRIGHTNESS, level);
+ dsi_bus_unlock(lcd_ix);
if (r)
return r;
}
@@ -342,7 +358,15 @@ static irqreturn_t taal_te_isr(int irq, void *data)
{
struct omap_dss_device *dssdev = data;
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ complete_all(&td->te_completion);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t taal_te_isr2(int irq, void *data)
+{
+ struct omap_dss_device *dssdev = data;
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
complete_all(&td->te_completion);
return IRQ_HANDLED;
@@ -355,11 +379,13 @@ static ssize_t taal_num_errors_show(struct device *dev,
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
u8 errors;
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
if (td->enabled) {
- dsi_bus_lock();
- r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
- dsi_bus_unlock();
+ dsi_bus_lock(lcd_ix);
+ r = taal_dcs_read_1(lcd_ix, DCS_READ_NUM_ERRORS, &errors);
+ dsi_bus_unlock(lcd_ix);
} else {
r = -ENODEV;
}
@@ -377,11 +403,13 @@ static ssize_t taal_hw_revision_show(struct device *dev,
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
u8 id1, id2, id3;
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
if (td->enabled) {
- dsi_bus_lock();
- r = taal_get_id(&id1, &id2, &id3);
- dsi_bus_unlock();
+ dsi_bus_lock(lcd_ix);
+ r = taal_get_id(lcd_ix, &id1, &id2, &id3);
+ dsi_bus_unlock(lcd_ix);
} else {
r = -ENODEV;
}
@@ -426,6 +454,8 @@ static ssize_t store_cabc_mode(struct device *dev,
struct omap_dss_device *dssdev = to_dss_device(dev);
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int i;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
if (sysfs_streq(cabc_modes[i], buf))
@@ -436,10 +466,10 @@ static ssize_t store_cabc_mode(struct device *dev,
return -EINVAL;
if (td->enabled) {
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
if (!td->cabc_broken)
- taal_dcs_write_1(DCS_WRITE_CABC, i);
- dsi_bus_unlock();
+ taal_dcs_write_1(lcd_ix, DCS_WRITE_CABC, i);
+ dsi_bus_unlock(lcd_ix);
}
td->cabc_mode = i;
@@ -487,6 +517,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
struct taal_data *td;
struct backlight_device *bldev;
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
const struct omap_video_timings taal_panel_timings = {
.x_res = 864,
@@ -495,8 +527,11 @@ static int taal_probe(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "probe\n");
- dssdev->panel.config = OMAP_DSS_LCD_TFT;
+ dssdev->panel.config = OMAP_DSS_LCD_TFT |
+ OMAP_DSS_LCD_ONOFF | OMAP_DSS_LCD_RF;
dssdev->panel.timings = taal_panel_timings;
+ dssdev->panel.acbi = 0;
+ dssdev->panel.acb = 0;
dssdev->ctrl.pixel_size = 24;
td = kzalloc(sizeof(*td), GFP_KERNEL);
@@ -524,7 +559,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
if (!dssdev->set_backlight)
td->use_dsi_bl = true;
- bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
+ bldev = backlight_device_register(dssdev->name, &dssdev->dev, dssdev,
&taal_bl_ops);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
@@ -547,6 +582,22 @@ static int taal_probe(struct omap_dss_device *dssdev)
if (dssdev->phy.dsi.ext_te) {
int gpio = dssdev->phy.dsi.ext_te_gpio;
+ void __iomem *phymux_base = NULL;
+ int val;
+
+ phymux_base = ioremap(0x4A100000, 0x1000);
+
+ if (lcd_ix == dsi1) {
+ val = __raw_readl(phymux_base + 0x90);
+ val = val & 0xFFFFFFE0;
+ val = val | 0x11B;
+ __raw_writel(val, phymux_base + 0x90);
+ } else {
+ val = __raw_readl(phymux_base + 0x94);
+ val = val & 0xFFFFFFE0;
+ val = val | 0x11B;
+ __raw_writel(val, phymux_base + 0x94);
+ }
r = gpio_request(gpio, "taal irq");
if (r) {
@@ -556,9 +607,15 @@ static int taal_probe(struct omap_dss_device *dssdev)
gpio_direction_input(gpio);
- r = request_irq(gpio_to_irq(gpio), taal_te_isr,
+ if (lcd_ix == dsi1) {
+ r = request_irq(gpio_to_irq(gpio), taal_te_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "taal vsync", dssdev);
+ } else {
+ r = request_irq(gpio_to_irq(gpio), taal_te_isr2,
IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "taal vsync", dssdev);
+ "taal vsync2", dssdev);
+ }
if (r) {
dev_err(&dssdev->dev, "IRQ request failed\n");
@@ -624,8 +681,10 @@ static void taal_remove(struct omap_dss_device *dssdev)
static int taal_enable(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- u8 id1, id2, id3;
+ u8 id1 = 0, id2 = 0, id3 = 0;
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
dev_dbg(&dssdev->dev, "enable\n");
@@ -634,15 +693,17 @@ static int taal_enable(struct omap_dss_device *dssdev)
if (r)
return r;
}
+ mdelay(100);
+ r = taal_reset(lcd_ix, td);
/* it seems we have to wait a bit until taal is ready */
msleep(5);
- r = taal_sleep_out(td);
+ r = taal_sleep_out(lcd_ix, td);
if (r)
goto err;
- r = taal_get_id(&id1, &id2, &id3);
+ r = taal_get_id(lcd_ix, &id1, &id2, &id3);
if (r)
goto err;
@@ -650,16 +711,17 @@ static int taal_enable(struct omap_dss_device *dssdev)
if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
td->cabc_broken = true;
- taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
- taal_dcs_write_1(DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */
+ taal_dcs_write_1(lcd_ix, DCS_BRIGHTNESS, 0xff);
+ taal_dcs_write_1(lcd_ix, DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */
+
+ taal_dcs_write_1(lcd_ix, DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
- taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
- taal_set_addr_mode(td->rotate, td->mirror);
+ taal_set_addr_mode(lcd_ix, td->rotate, td->mirror);
if (!td->cabc_broken)
- taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+ taal_dcs_write_1(lcd_ix, DCS_WRITE_CABC, td->cabc_mode);
- taal_dcs_write_0(DCS_DISPLAY_ON);
+ taal_dcs_write_0(lcd_ix, DCS_DISPLAY_ON);
#ifdef TAAL_USE_ESD_CHECK
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
@@ -687,13 +749,15 @@ err:
static void taal_disable(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
dev_dbg(&dssdev->dev, "disable\n");
cancel_delayed_work(&td->esd_work);
- taal_dcs_write_0(DCS_DISPLAY_OFF);
- taal_sleep_in(td);
+ taal_dcs_write_0(lcd_ix, DCS_DISPLAY_OFF);
+ taal_sleep_in(lcd_ix, td);
/* wait a bit so that the message goes through */
msleep(10);
@@ -729,20 +793,25 @@ static int taal_resume(struct omap_dss_device *dssdev)
static void taal_setup_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
- taal_set_update_window(x, y, w, h);
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+
+ taal_set_update_window(lcd_ix, x, y, w, h);
}
static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
td->te_enabled = enable;
if (enable)
- r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(lcd_ix, DCS_TEAR_ON, 0);
else
- r = taal_dcs_write_0(DCS_TEAR_OFF);
+ r = taal_dcs_write_0(lcd_ix, DCS_TEAR_OFF);
return r;
}
@@ -762,6 +831,11 @@ static int taal_wait_te(struct omap_dss_device *dssdev)
return -ETIME;
}
+ /* Tearing interrupt comes slightly early, we wait
+ * for 725us after the rising edge
+ */
+ if (cpu_is_omap44xx())
+ udelay(725);
return 0;
}
@@ -769,11 +843,13 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
if (td->enabled) {
- r = taal_set_addr_mode(rotate, td->mirror);
+ r = taal_set_addr_mode(lcd_ix, rotate, td->mirror);
if (r)
return r;
@@ -794,11 +870,13 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
dev_dbg(&dssdev->dev, "mirror %d\n", enable);
if (td->enabled) {
- r = taal_set_addr_mode(td->rotate, enable);
+ r = taal_set_addr_mode(lcd_ix, td->rotate, enable);
if (r)
return r;
@@ -819,14 +897,16 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
{
u8 id1, id2, id3;
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
- r = taal_dcs_read_1(DCS_GET_ID1, &id1);
+ r = taal_dcs_read_1(lcd_ix, DCS_GET_ID1, &id1);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID2, &id2);
+ r = taal_dcs_read_1(lcd_ix, DCS_GET_ID2, &id2);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID3, &id3);
+ r = taal_dcs_read_1(lcd_ix, DCS_GET_ID3, &id3);
if (r)
return r;
@@ -841,6 +921,8 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
int first = 1;
int plen;
unsigned buf_used = 0;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
if (size < w * h * 3)
return -ENOMEM;
@@ -859,7 +941,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
taal_setup_update(dssdev, x, y, w, h);
- r = dsi_vc_set_max_rx_packet_size(TCH, plen);
+ r = dsi_vc_set_max_rx_packet_size(lcd_ix, TCH, plen);
if (r)
return r;
@@ -867,11 +949,11 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
u8 dcs_cmd = first ? 0x2e : 0x3e;
first = 0;
- r = dsi_vc_dcs_read(TCH, dcs_cmd,
+ r = dsi_vc_dcs_read(lcd_ix, TCH, dcs_cmd,
buf + buf_used, size - buf_used);
if (r < 0) {
- dev_err(&dssdev->dev, "read error\n");
+ dev_err(&dssdev->dev, "taal read error\n");
goto err;
}
@@ -893,7 +975,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
r = buf_used;
err:
- dsi_vc_set_max_rx_packet_size(TCH, 1);
+ dsi_vc_set_max_rx_packet_size(lcd_ix, TCH, 1);
return r;
}
@@ -905,26 +987,28 @@ static void taal_esd_work(struct work_struct *work)
struct omap_dss_device *dssdev = td->dssdev;
u8 state1, state2;
int r;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
if (!td->enabled)
return;
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
- r = taal_dcs_read_1(DCS_RDDSDR, &state1);
+ r = taal_dcs_read_1(lcd_ix, DCS_RDDSDR, &state1);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
}
/* Run self diagnostics */
- r = taal_sleep_out(td);
+ r = taal_sleep_out(lcd_ix, td);
if (r) {
dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n");
goto err;
}
- r = taal_dcs_read_1(DCS_RDDSDR, &state2);
+ r = taal_dcs_read_1(lcd_ix, DCS_RDDSDR, &state2);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -942,7 +1026,7 @@ static void taal_esd_work(struct work_struct *work)
if (td->use_ext_te && td->te_enabled)
taal_enable_te(dssdev, true);
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
@@ -953,7 +1037,7 @@ err:
taal_disable(dssdev);
taal_enable(dssdev);
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
}
@@ -978,7 +1062,32 @@ static struct omap_dss_driver taal_driver = {
.memory_read = taal_memory_read,
.driver = {
- .name = "taal",
+ .name = "panel-taal",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct omap_dss_driver taal_driver2 = {
+ .probe = taal_probe,
+ .remove = taal_remove,
+
+ .enable = taal_enable,
+ .disable = taal_disable,
+ .suspend = taal_suspend,
+ .resume = taal_resume,
+
+ .setup_update = taal_setup_update,
+ .enable_te = taal_enable_te,
+ .wait_for_te = taal_wait_te,
+ .set_rotate = taal_rotate,
+ .get_rotate = taal_get_rotate,
+ .set_mirror = taal_mirror,
+ .get_mirror = taal_get_mirror,
+ .run_test = taal_run_test,
+ .memory_read = taal_memory_read,
+
+ .driver = {
+ .name = "panel-taal2",
.owner = THIS_MODULE,
},
};
@@ -986,13 +1095,17 @@ static struct omap_dss_driver taal_driver = {
static int __init taal_init(void)
{
omap_dss_register_driver(&taal_driver);
-
+ if (cpu_is_omap44xx())
+ omap_dss_register_driver(&taal_driver2);
+ printk(KERN_INFO "\n omap_dss_register_driver DONE ");
return 0;
}
static void __exit taal_exit(void)
{
omap_dss_unregister_driver(&taal_driver);
+ if (cpu_is_omap44xx())
+ omap_dss_unregister_driver(&taal_driver2);
}
module_init(taal_init);
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 71d8dec30635..682bad347c83 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,8 +1,9 @@
menuconfig OMAP2_DSS
tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
- depends on ARCH_OMAP2 || ARCH_OMAP3
- help
- OMAP2/3 Display Subsystem support.
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+ default y
+ help
+ OMAP2/3/4 Display Subsystem support.
if OMAP2_DSS
@@ -37,6 +38,12 @@ config OMAP2_DSS_VENC
help
OMAP Video Encoder support.
+config OMAP2_DSS_HDMI
+ bool "HDMI support"
+ default n
+ help
+ OMAP HDMI panel support.
+
config OMAP2_DSS_SDI
bool "SDI support"
depends on ARCH_OMAP3
@@ -46,7 +53,7 @@ config OMAP2_DSS_SDI
config OMAP2_DSS_DSI
bool "DSI support"
- depends on ARCH_OMAP3
+ depends on ARCH_OMAP3 || ARCH_OMAP4
default n
help
MIPI DSI support.
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 980c72c2db98..521b754da6fb 100644..100755
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -4,3 +4,4 @@ omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
+omapdss-$(CONFIG_OMAP2_DSS_HDMI) += hdmi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 29497a0c9a91..582cad6f83f7 100644..100755
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -59,10 +59,13 @@ module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp_name, "default display name");
#ifdef DEBUG
-unsigned int dss_debug;
+unsigned int dss_debug = 1;
module_param_named(debug, dss_debug, bool, 0644);
#endif
+static int hdmi_code = 16;
+module_param_named(hdmicode, hdmi_code, int, 0644);
+
/* CONTEXT */
static int dss_get_ctx_id(void)
{
@@ -227,19 +230,33 @@ static void dss_put_clocks(void)
unsigned long dss_clk_get_rate(enum dss_clock clk)
{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(core.dss_ick);
- case DSS_CLK_FCK1:
- return clk_get_rate(core.dss1_fck);
- case DSS_CLK_FCK2:
- return clk_get_rate(core.dss2_fck);
- case DSS_CLK_54M:
- return clk_get_rate(core.dss_54m_fck);
- case DSS_CLK_96M:
- return clk_get_rate(core.dss_96m_fck);
+ if(!cpu_is_omap44xx()) {
+ switch (clk) {
+ case DSS_CLK_ICK:
+ return clk_get_rate(core.dss_ick);
+ case DSS_CLK_FCK1:
+ return clk_get_rate(core.dss1_fck);
+ case DSS_CLK_FCK2:
+ return clk_get_rate(core.dss2_fck);
+ case DSS_CLK_54M:
+ return clk_get_rate(core.dss_54m_fck);
+ case DSS_CLK_96M:
+ return clk_get_rate(core.dss_96m_fck);
+ }
+ } else {
+ switch (clk) {
+ case DSS_CLK_ICK:
+ return 166000000;
+ case DSS_CLK_FCK1:
+ return 153600000;
+ case DSS_CLK_FCK2:
+ return 0;
+ case DSS_CLK_54M:
+ return 54000000;
+ case DSS_CLK_96M:
+ return 96000000;
+ }
}
-
BUG();
return 0;
}
@@ -286,6 +303,8 @@ void dss_clk_enable(enum dss_clock clks)
if (cpu_is_omap34xx() && dss_need_ctx_restore())
restore_all_ctx();
+ else if (cpu_is_omap44xx() && dss_need_ctx_restore())
+ restore_all_ctx();
}
static void dss_clk_disable_no_ctx(enum dss_clock clks)
@@ -325,7 +344,7 @@ static void dss_clk_enable_all_no_ctx(void)
enum dss_clock clks;
clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
clks |= DSS_CLK_96M;
dss_clk_enable_no_ctx(clks);
}
@@ -335,7 +354,7 @@ static void dss_clk_disable_all_no_ctx(void)
enum dss_clock clks;
clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
clks |= DSS_CLK_96M;
dss_clk_disable_no_ctx(clks);
}
@@ -345,7 +364,7 @@ static void dss_clk_disable_all(void)
enum dss_clock clks;
clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
clks |= DSS_CLK_96M;
dss_clk_disable(clks);
}
@@ -358,7 +377,8 @@ static void dss_debug_dump_clocks(struct seq_file *s)
dss_dump_clocks(s);
dispc_dump_clocks(s);
#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
+ dsi_dump_clocks(dsi1, s);
+ dsi_dump_clocks(dsi2, s);
#endif
}
@@ -434,9 +454,15 @@ static int omap_dss_probe(struct platform_device *pdev)
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
- r = dss_get_clocks();
- if (r)
- goto fail0;
+ /*
+ * FIX-ME: Replace with correct clk node when clk
+ * framework is available
+ */
+ if (!cpu_is_omap44xx()) {
+ r = dss_get_clocks();
+ if (r)
+ goto fail0;
+ }
dss_clk_enable_all_no_ctx();
@@ -489,15 +515,31 @@ static int omap_dss_probe(struct platform_device *pdev)
goto fail0;
}
#endif
+ }
#ifdef CONFIG_OMAP2_DSS_DSI
+ printk(KERN_INFO "dsi_init calling");
r = dsi_init(pdev);
if (r) {
DSSERR("Failed to initialize DSI\n");
goto fail0;
}
+ if (cpu_is_omap44xx()) {
+ printk(KERN_INFO "dsi2_init calling");
+ r = dsi2_init(pdev);
+ if (r) {
+ DSSERR("Failed to initialize DSI2\n");
+ goto fail0;
+ }
+ }
#endif
- }
+#ifdef CONFIG_OMAP2_DSS_HDMI
+ r = hdmi_init(pdev, hdmi_code);
+ if (r) {
+ DSSERR("Failed to initialize hdmi\n");
+ goto fail0;
+ }
+#endif
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
r = dss_initialize_debugfs();
if (r)
@@ -537,6 +579,9 @@ static int omap_dss_remove(struct platform_device *pdev)
#ifdef CONFIG_OMAP2_DSS_VENC
venc_exit();
#endif
+#ifdef CONFIG_OMAP2_DSS_HDMI
+ hdmi_exit();
+#endif
dispc_exit();
dpi_exit();
#ifdef CONFIG_OMAP2_DSS_RFBI
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 6dabf4b2f005..3831a069929a 100644..100755
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -38,9 +38,20 @@
#include <plat/display.h>
#include "dss.h"
-
-/* DISPC */
-#define DISPC_BASE 0x48050400
+#include <mach/tiler.h>
+#include"../../../media/video/tiler/tiler.h"
+
+#ifndef CONFIG_ARCH_OMAP4
+ /* DSS */
+ #define DSS_BASE 0x48050000
+ /* DISPLAY CONTROLLER */
+ #define DISPC_BASE 0x48050400
+#else
+ /* DSS */
+ #define DSS_BASE 0x58000000
+ /* DISPLAY CONTROLLER */
+ #define DISPC_BASE 0x58001000
+#endif
#define DISPC_SZ_REGS SZ_1K
@@ -55,7 +66,9 @@ struct dispc_reg { u16 idx; };
#define DISPC_IRQSTATUS DISPC_REG(0x0018)
#define DISPC_IRQENABLE DISPC_REG(0x001C)
#define DISPC_CONTROL DISPC_REG(0x0040)
+ /* DISPC_CONTROL1 in OMAP4 */
#define DISPC_CONFIG DISPC_REG(0x0044)
+ /* DISPC_CONFIG1 in OMAP4 */
#define DISPC_CAPABLE DISPC_REG(0x0048)
#define DISPC_DEFAULT_COLOR0 DISPC_REG(0x004C)
#define DISPC_DEFAULT_COLOR1 DISPC_REG(0x0050)
@@ -64,12 +77,28 @@ struct dispc_reg { u16 idx; };
#define DISPC_LINE_STATUS DISPC_REG(0x005C)
#define DISPC_LINE_NUMBER DISPC_REG(0x0060)
#define DISPC_TIMING_H DISPC_REG(0x0064)
+ /* DISPC_TIMING_H1 in OMAP4 */
#define DISPC_TIMING_V DISPC_REG(0x0068)
+ /* DISPC_TIMING_V1 in OMAP4 */
#define DISPC_POL_FREQ DISPC_REG(0x006C)
+ /* DISPC_POL_FREQ1 in OMAP4 */
+
+#ifndef CONFIG_ARCH_OMAP4
#define DISPC_DIVISOR DISPC_REG(0x0070)
+#else
+#define DISPC_DIVISOR DISPC_REG(0x0804)
+#define DISPC_DIVISOR1 DISPC_REG(0x0070)
+#endif
+
#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
#define DISPC_SIZE_DIG DISPC_REG(0x0078)
+ /* DISPC_SIZE_TV in OMAP4 */
#define DISPC_SIZE_LCD DISPC_REG(0x007C)
+ /* DISPC_SIZE_LCD1 in OMAP4 */
+
+#ifdef CONFIG_ARCH_OMAP4
+#define DISPC_GLOBAL_BUFFER DISPC_REG(0x0800)
+#endif
/* DISPC GFX plane */
#define DISPC_GFX_BA0 DISPC_REG(0x0080)
@@ -78,19 +107,27 @@ struct dispc_reg { u16 idx; };
#define DISPC_GFX_SIZE DISPC_REG(0x008C)
#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0)
#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4)
+ /* DISPC_GFX_BUF_THRESHOLD in OMAP4 */
#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8)
+ /* DISPC_GFX_BUF_SIZE_STATUS in OMAP4 */
#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC)
#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0)
-#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
+#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4)
+ /* DISPC_DATA1_CYCLE1 in OMAP4 */
#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8)
+ /* DISPC_DATA1_CYCLE2 in OMAP4 */
#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC)
+ /* DISPC_DATA1_CYCLE3 in OMAP4 */
#define DISPC_CPR_COEF_R DISPC_REG(0x0220)
+ /* DISPC_CPR1_COEFF_R in OMAP4 */
#define DISPC_CPR_COEF_G DISPC_REG(0x0224)
+ /* DISPC_CPR1_COEFF_G in OMAP4 */
#define DISPC_CPR_COEF_B DISPC_REG(0x0228)
+ /* DISPC_CPR1_COEFF_B in OMAP4 */
#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
@@ -99,6 +136,7 @@ struct dispc_reg { u16 idx; };
#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000)
#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004)
+
#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008)
#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C)
#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010)
@@ -122,12 +160,131 @@ struct dispc_reg { u16 idx; };
#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
+#ifdef CONFIG_ARCH_OMAP4
+
+#define DISPC_CONTROL2 DISPC_REG(0x0238)
+
+/******** registers related to VID3 and WB pipelines ****/
+/* DISPC Video plane, n = 0 for VID3, n = 1 for WB _VID_V3_WB_ */
+#define DISPC_VID_V3_WB_REG(n, idx) DISPC_REG(0x0300 + (n)*0x200 + idx)
+
+#define DISPC_VID_V3_WB_ACCU0(n) DISPC_VID_V3_WB_REG(n, 0x0000)
+#define DISPC_VID_V3_WB_ACCU1(n) DISPC_VID_V3_WB_REG(n, 0x0004)
+
+#define DISPC_VID_V3_WB_BA0(n) DISPC_VID_V3_WB_REG(n, 0x0008)
+#define DISPC_VID_V3_WB_BA1(n) DISPC_VID_V3_WB_REG(n, 0x000C)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_V3_WB_FIR_COEF_H(n, i) DISPC_REG(0x0310+(n)*0x200+(i)*0x8)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_V3_WB_FIR_COEF_HV(n, i) DISPC_REG(0x0314+(n)*0x200+(i)*0x8)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_V3_WB_FIR_COEF_V(n, i) DISPC_REG(0x0350+(n)*0x200+(i)*0x4)
+
+#define DISPC_VID_V3_WB_ATTRIBUTES(n) DISPC_VID_V3_WB_REG(n, 0x0070)
+
+/* coef index i = {0, 1, 2, 3, 4} */
+#define DISPC_VID_V3_WB_CONV_COEF(n, i) DISPC_REG(0x0374 + (n)*0x200 + (i)*0x4)
+
+#define DISPC_VID_V3_WB_BUF_SIZE_STATUS(n) DISPC_VID_V3_WB_REG(n, 0x0088)
+#define DISPC_VID_V3_WB_BUF_THRESHOLD(n) DISPC_VID_V3_WB_REG(n, 0x008C)
+#define DISPC_VID_V3_WB_FIR(n) DISPC_VID_V3_WB_REG(n, 0x0090)
+#define DISPC_VID_V3_WB_PICTURE_SIZE(n) DISPC_VID_V3_WB_REG(n, 0x0094)
+#define DISPC_VID_V3_WB_PIXEL_INC(n) DISPC_VID_V3_WB_REG(n, 0x0098)
+
+#define DISPC_VID_VID3_POSITION DISPC_REG(0x039C)
+#define DISPC_VID_VID3_PRELOAD DISPC_REG(0x03A0)
+
+#define DISPC_VID_V3_WB_ROW_INC(n) DISPC_VID_V3_WB_REG(n, 0x00A4)
+#define DISPC_VID_V3_WB_SIZE(n) DISPC_VID_V3_WB_REG(n, 0x00A8)
+
+#define DISPC_VID_V3_WB_FIR2(n) DISPC_REG(0x0724 + (n)*0x6C)
+ /* n=0: VID3, n=1: WB*/
+
+#define DISPC_VID_V3_WB_ACCU2_0(n) DISPC_REG(0x0728 + (n)*0x6C)
+ /* n=0: VID3, n=1: WB*/
+#define DISPC_VID_V3_WB_ACCU2_1(n) DISPC_REG(0x072C + (n)*0x6C)
+ /* n=0: VID3, n=1: WB*/
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} n=0: VID3, n=1: WB */
+#define DISPC_VID_V3_WB_FIR_COEF_H2(n, i) DISPC_REG(0x0730+(n)*0x6C+(i)*0x8)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_V3_WB_FIR_COEF_HV2(n, i) DISPC_REG(0x0734+(n)*0x6C+(i)*0x8)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_V3_WB_FIR_COEF_V2(n, i) DISPC_REG(0x0770+(n)*0x6C+(i)*0x4)
+
+
+/*********End Vid3 and WB Registers ***************/
+
+/********** OMAP4 new global registers **/
+#define DISPC_DEFAULT_COLOR2 DISPC_REG(0x03AC)
+#define DISPC_TRANS_COLOR2 DISPC_REG(0x03B0)
+#define DISPC_CPR2_COEF_B DISPC_REG(0x03B4)
+#define DISPC_CPR2_COEF_G DISPC_REG(0x03B8)
+#define DISPC_CPR2_COEF_R DISPC_REG(0x03BC)
+#define DISPC_DATA2_CYCLE1 DISPC_REG(0x03C0)
+#define DISPC_DATA2_CYCLE2 DISPC_REG(0x03C4)
+#define DISPC_DATA2_CYCLE3 DISPC_REG(0x03C8)
+#define DISPC_SIZE_LCD2 DISPC_REG(0x03CC)
+#define DISPC_TIMING_H2 DISPC_REG(0x0400)
+#define DISPC_TIMING_V2 DISPC_REG(0x0404)
+#define DISPC_POL_FREQ2 DISPC_REG(0x0408)
+#define DISPC_DIVISOR2 DISPC_REG(0x040C)
+
+
+
+/* DISPC Video plane,
+ n = 0 for VID1
+ n = 1 for VID2
+ and n = 2 for VID3,
+ n = 3 for WB*/
+#define DISPC_VID_OMAP4_REG(n, idx) DISPC_REG(0x0600 + (n)*0x04 + idx)
+
+#define DISPC_VID_BA_UV0(n) DISPC_VID_OMAP4_REG((n)*2, 0x0000)
+#define DISPC_VID_BA_UV1(n) DISPC_VID_OMAP4_REG((n)*2, 0x0004)
+
+#define DISPC_CONFIG2 DISPC_REG(0x0620)
+
+#define DISPC_VID_ATTRIBUTES2(n) DISPC_VID_OMAP4_REG(n, 0x0024)
+ /* n = {0,1,2,3} */
+#define DISPC_GAMMA_TABLE(n) DISPC_VID_OMAP4_REG(n, 0x0030)
+ /* n = {0,1,2,3} */
+
+/* VID1/VID2 specific new registers */
+#define DISPC_VID_FIR2(n) DISPC_REG(0x063C + (n)*0x6C)
+ /* n=0: VID1, n=1: VID2*/
+
+#define DISPC_VID_ACCU2_0(n) DISPC_REG(0x0640 + (n)*0x6C)
+ /* n=0: VID1, n=1: VID2*/
+#define DISPC_VID_ACCU2_1(n) DISPC_REG(0x0644 + (n)*0x6C)
+ /* n=0: VID1, n=1: VID2*/
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} n=0: VID1, n=1: VID2 */
+#define DISPC_VID_FIR_COEF_H2(n, i) DISPC_REG(0x0648 + (n)*0x6C + (i)*0x8)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_HV2(n, i) DISPC_REG(0x064C + (n)*0x6C + (i)*0x8)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_V2(n, i) DISPC_REG(0x0688 + (n)*0x6C + (i)*0x4)
+
+/*end of VID1/VID2 specific new registers*/
+
+
+#endif
+
+/*#Enable sync after LCD comes up! SPC_IRQ_SYNC_LOST | \
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+
+*/
#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
DISPC_IRQ_OCP_ERR | \
- DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
- DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
- DISPC_IRQ_SYNC_LOST | \
DISPC_IRQ_SYNC_LOST_DIGIT)
#define DISPC_MAX_NR_ISRS 8
@@ -146,7 +303,12 @@ struct omap_dispc_isr_data {
static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
DISPC_VID_ATTRIBUTES(0),
- DISPC_VID_ATTRIBUTES(1) };
+ DISPC_VID_ATTRIBUTES(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_ATTRIBUTES(0)/* VID 3 pipeline */
+#endif
+
+};
static struct {
void __iomem *base;
@@ -161,6 +323,7 @@ static struct {
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
} dispc;
+extern void __iomem *dispc_base;
static void _omap_dispc_set_irqs(void);
@@ -174,6 +337,26 @@ static inline u32 dispc_read_reg(const struct dispc_reg idx)
return __raw_readl(dispc.base + idx.idx);
}
+static inline u8 calc_tiler_orientation(u8 rotation, u8 mir)
+{
+ static u8 orientation;
+ switch (rotation) {
+ case 0:
+ orientation = (mir ? 0x2 : 0x0);
+ break;
+ case 1:
+ orientation = (mir ? 0x7 : 0x6);
+ break;
+ case 2:
+ orientation = (mir ? 0x1 : 0x3);
+ break;
+ case 3:
+ orientation = (mir ? 0x4 : 0x5);
+ break;
+ }
+ return orientation;
+}
+
#define SR(reg) \
dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
@@ -222,6 +405,10 @@ void dispc_save_context(void)
SR(GFX_PRELOAD);
+#ifdef CONFIG_ARCH_OMAP4
+/* TODO: find out what needs to be saved */
+#endif
+
/* VID1 */
SR(VID_BA0(0));
SR(VID_BA1(0));
@@ -340,6 +527,10 @@ void dispc_restore_context(void)
RR(SIZE_DIG);
RR(SIZE_LCD);
+#ifdef CONFIG_ARCH_OMAP4
+ /* TODO: find out what needs to be saved/restored */
+#endif
+
RR(GFX_BA0);
RR(GFX_BA1);
RR(GFX_POSITION);
@@ -477,13 +668,21 @@ static inline void enable_clocks(bool enable)
bool dispc_go_busy(enum omap_channel channel)
{
int bit;
-
+#ifdef CONFIG_ARCH_OMAP4
+ if (channel != OMAP_DSS_CHANNEL_DIGIT)
+#else
if (channel == OMAP_DSS_CHANNEL_LCD)
+#endif
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
- return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+#ifdef CONFIG_ARCH_OMAP4
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ return REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+ else
+#endif
+ return REG_GET(DISPC_CONTROL, bit, bit) == 1;
}
void dispc_go(enum omap_channel channel)
@@ -491,21 +690,35 @@ void dispc_go(enum omap_channel channel)
int bit;
enable_clocks(1);
-
+#ifdef CONFIG_ARCH_OMAP4
+ if (channel != OMAP_DSS_CHANNEL_DIGIT)
+#else
if (channel == OMAP_DSS_CHANNEL_LCD)
+#endif
bit = 0; /* LCDENABLE */
else
bit = 1; /* DIGITALENABLE */
- /* if the channel is not enabled, we don't need GO */
- if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
- goto end;
-
+#ifdef CONFIG_ARCH_OMAP4
+ if (channel == OMAP_DSS_CHANNEL_LCD2) {
+ if (REG_GET(DISPC_CONTROL2, bit, bit) == 0)
+ goto end;
+ } else
+#endif
+ {
+ if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
+ goto end;
+ }
+#ifdef CONFIG_ARCH_OMAP4
+ if (channel != OMAP_DSS_CHANNEL_DIGIT)
+#else
if (channel == OMAP_DSS_CHANNEL_LCD)
+#endif
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
+#ifndef CONFIG_ARCH_OMAP4
if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
DSSERR("GO bit not down for channel %d\n", channel);
goto end;
@@ -514,182 +727,127 @@ void dispc_go(enum omap_channel channel)
DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+#else
+ if (channel == OMAP_DSS_CHANNEL_LCD2) {
+ if (REG_GET(DISPC_CONTROL2, bit, bit) == 1) {
+ /* FIXME PICO DLP on Channel 2 needs GO bit to be UP
+ it will come as error so changing to DSSDBG*/
+ DSSDBG("GO bit not down for channel %d\n", channel);
+ goto end;
+ }
+ DSSDBG("GO LCD2\n");
+ REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
+ } else {
+ if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
+ DSSERR("GO bit not down for channel %d\n", channel);
+ goto end;
+ }
+
+ DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ?
+ "LCD" : "DIGIT");
+
+ REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+ }
+#endif
+
end:
enable_clocks(0);
}
+
static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value);
+#ifdef CONFIG_ARCH_OMAP4
+ if ((OMAP_DSS_VIDEO1 == plane) || (OMAP_DSS_VIDEO2 == plane))
+#endif
+ dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value);
+#ifdef CONFIG_ARCH_OMAP4
+ else if (OMAP_DSS_VIDEO3 == plane)
+ dispc_write_reg(DISPC_VID_V3_WB_FIR_COEF_H(0, reg), value);
+#endif
+
}
static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value);
+#ifdef CONFIG_ARCH_OMAP4
+ if ((OMAP_DSS_VIDEO1 == plane) || (OMAP_DSS_VIDEO2 == plane))
+#endif
+ dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value);
+#ifdef CONFIG_ARCH_OMAP4
+ else if (OMAP_DSS_VIDEO3 == plane)
+ dispc_write_reg(DISPC_VID_V3_WB_FIR_COEF_HV(0, reg), value);
+#endif
}
static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value);
+#ifdef CONFIG_ARCH_OMAP4
+ if ((OMAP_DSS_VIDEO1 == plane) || (OMAP_DSS_VIDEO2 == plane))
+#endif
+
+ dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value);
+#ifdef CONFIG_ARCH_OMAP4
+ else if (OMAP_DSS_VIDEO3 == plane)
+ dispc_write_reg(DISPC_VID_V3_WB_FIR_COEF_V(0, reg), value);
+#endif
}
-static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
- int vscaleup, int five_taps)
+#ifdef CONFIG_ARCH_OMAP4
+static void _dispc_write_firh2_reg(enum omap_plane plane, int reg, u32 value)
{
- /* Coefficients for horizontal up-sampling */
- static const u32 coef_hup[8] = {
- 0x00800000,
- 0x0D7CF800,
- 0x1E70F5FF,
- 0x335FF5FE,
- 0xF74949F7,
- 0xF55F33FB,
- 0xF5701EFE,
- 0xF87C0DFF,
- };
+ BUG_ON(plane == OMAP_DSS_GFX);
- /* Coefficients for horizontal down-sampling */
- static const u32 coef_hdown[8] = {
- 0x24382400,
- 0x28371FFE,
- 0x2C361BFB,
- 0x303516F9,
- 0x11343311,
- 0x1635300C,
- 0x1B362C08,
- 0x1F372804,
- };
+ if ((OMAP_DSS_VIDEO1 == plane) || (OMAP_DSS_VIDEO2 == plane))
+ dispc_write_reg(DISPC_VID_FIR_COEF_H2(plane-1, reg), value);
+ else if (OMAP_DSS_VIDEO3 == plane)
+ dispc_write_reg(DISPC_VID_V3_WB_FIR_COEF_H2(0, reg), value);
- /* Coefficients for horizontal and vertical up-sampling */
- static const u32 coef_hvup[2][8] = {
- {
- 0x00800000,
- 0x037B02FF,
- 0x0C6F05FE,
- 0x205907FB,
- 0x00404000,
- 0x075920FE,
- 0x056F0CFF,
- 0x027B0300,
- },
- {
- 0x00800000,
- 0x0D7CF8FF,
- 0x1E70F5FE,
- 0x335FF5FB,
- 0xF7404000,
- 0xF55F33FE,
- 0xF5701EFF,
- 0xF87C0D00,
- },
- };
+}
- /* Coefficients for horizontal and vertical down-sampling */
- static const u32 coef_hvdown[2][8] = {
- {
- 0x24382400,
- 0x28391F04,
- 0x2D381B08,
- 0x3237170C,
- 0x123737F7,
- 0x173732F9,
- 0x1B382DFB,
- 0x1F3928FE,
- },
- {
- 0x24382400,
- 0x28371F04,
- 0x2C361B08,
- 0x3035160C,
- 0x113433F7,
- 0x163530F9,
- 0x1B362CFB,
- 0x1F3728FE,
- },
- };
+static void _dispc_write_firhv2_reg(enum omap_plane plane, int reg, u32 value)
+{
+ BUG_ON(plane == OMAP_DSS_GFX);
- /* Coefficients for vertical up-sampling */
- static const u32 coef_vup[8] = {
- 0x00000000,
- 0x0000FF00,
- 0x0000FEFF,
- 0x0000FBFE,
- 0x000000F7,
- 0x0000FEFB,
- 0x0000FFFE,
- 0x000000FF,
- };
+ if ((OMAP_DSS_VIDEO1 == plane) || (OMAP_DSS_VIDEO2 == plane))
+ dispc_write_reg(DISPC_VID_FIR_COEF_HV2(plane-1, reg), value);
+ else if (OMAP_DSS_VIDEO3 == plane)
+ dispc_write_reg(DISPC_VID_V3_WB_FIR_COEF_HV2(0, reg), value);
+}
+static void _dispc_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
+{
+ BUG_ON(plane == OMAP_DSS_GFX);
- /* Coefficients for vertical down-sampling */
- static const u32 coef_vdown[8] = {
- 0x00000000,
- 0x000004FE,
- 0x000008FB,
- 0x00000CF9,
- 0x0000F711,
- 0x0000F90C,
- 0x0000FB08,
- 0x0000FE04,
- };
+ if ((OMAP_DSS_VIDEO1 == plane) || (OMAP_DSS_VIDEO2 == plane))
+ dispc_write_reg(DISPC_VID_FIR_COEF_V2(plane-1, reg), value);
+ else if (OMAP_DSS_VIDEO3 == plane)
+ dispc_write_reg(DISPC_VID_V3_WB_FIR_COEF_V2(0, reg), value);
+}
+#endif
- const u32 *h_coef;
- const u32 *hv_coef;
- const u32 *hv_coef_mod;
- const u32 *v_coef;
+static void _dispc_set_scale_coef(enum omap_plane plane, const s8 *hfir,
+ const s8 *vfir, int three_taps)
+{
int i;
-
- if (hscaleup)
- h_coef = coef_hup;
- else
- h_coef = coef_hdown;
-
- if (vscaleup) {
- hv_coef = coef_hvup[five_taps];
- v_coef = coef_vup;
-
- if (hscaleup)
- hv_coef_mod = NULL;
- else
- hv_coef_mod = coef_hvdown[five_taps];
- } else {
- hv_coef = coef_hvdown[five_taps];
- v_coef = coef_vdown;
-
- if (hscaleup)
- hv_coef_mod = coef_hvup[five_taps];
- else
- hv_coef_mod = NULL;
- }
-
- for (i = 0; i < 8; i++) {
- u32 h, hv;
-
- h = h_coef[i];
-
- hv = hv_coef[i];
-
- if (hv_coef_mod) {
- hv &= 0xffffff00;
- hv |= (hv_coef_mod[i] & 0xff);
- }
+ for (i = 0; i < 8; i++, hfir++, vfir++) {
+ u32 h, hv, v;
+ h = ((hfir[0] & 0xFF) | ((hfir[8] << 8) & 0xFF00) |
+ ((hfir[16] << 16) & 0xFF0000) |
+ ((hfir[24] << 24) & 0xFF000000));
+ hv = ((hfir[32] & 0xFF) | ((vfir[8] << 8) & 0xFF00) |
+ ((vfir[16] << 16) & 0xFF0000) |
+ ((vfir[24] << 24) & 0xFF000000));
+ v = ((vfir[0] & 0xFF) | ((vfir[32] << 8) & 0xFF00));
_dispc_write_firh_reg(plane, i, h);
_dispc_write_firhv_reg(plane, i, hv);
- }
-
- if (!five_taps)
- return;
-
- for (i = 0; i < 8; i++) {
- u32 v;
- v = v_coef[i];
_dispc_write_firv_reg(plane, i, v);
}
}
@@ -721,6 +879,21 @@ static void _dispc_setup_color_conv_coef(void)
dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by));
dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb));
+#ifdef CONFIG_ARCH_OMAP4
+ dispc_write_reg(DISPC_VID_V3_WB_CONV_COEF(0, 0),
+ CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(DISPC_VID_V3_WB_CONV_COEF(0, 1),
+ CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(DISPC_VID_V3_WB_CONV_COEF(0, 2),
+ CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(DISPC_VID_V3_WB_CONV_COEF(0, 3),
+ CVAL(ct->bcr, ct->by));
+ dispc_write_reg(DISPC_VID_V3_WB_CONV_COEF(0, 4),
+ CVAL(0, ct->bcb));
+
+ REG_FLD_MOD(DISPC_VID_V3_WB_ATTRIBUTES(0), ct->full_range, 11, 11);
+#endif
+
#undef CVAL
REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11);
@@ -732,7 +905,11 @@ static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
{
const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0,
DISPC_VID_BA0(0),
- DISPC_VID_BA0(1) };
+ DISPC_VID_BA0(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_BA0(0) /* VID 3 pipeline*/
+#endif
+ };
dispc_write_reg(ba0_reg[plane], paddr);
}
@@ -741,16 +918,56 @@ static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
{
const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1,
DISPC_VID_BA1(0),
- DISPC_VID_BA1(1) };
+ DISPC_VID_BA1(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_BA1(0) /* VID 3 pipeline*/
+#endif
+
+ };
dispc_write_reg(ba1_reg[plane], paddr);
}
+#ifdef CONFIG_ARCH_OMAP4
+static void _dispc_set_plane_ba_uv0(enum omap_plane plane, u32 paddr)
+{
+ const struct dispc_reg ba_uv0_reg[] = { DISPC_VID_BA_UV0(0),
+ DISPC_VID_BA_UV0(1),
+ DISPC_VID_BA_UV0(2) /* VID 3 pipeline*/
+ };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ dispc_write_reg(ba_uv0_reg[plane - 1], paddr);
+ /* plane - 1 => no UV_BA for GFX*/
+
+}
+
+static void _dispc_set_plane_ba_uv1(enum omap_plane plane, u32 paddr)
+{
+ const struct dispc_reg ba_uv1_reg[] = { DISPC_VID_BA_UV1(0),
+ DISPC_VID_BA_UV1(1),
+ DISPC_VID_BA_UV1(2)
+ };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ dispc_write_reg(ba_uv1_reg[plane - 1], paddr);
+ /* plane - 1 => no UV_BA for GFX*/
+}
+
+#endif
+
static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
{
const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION,
DISPC_VID_POSITION(0),
- DISPC_VID_POSITION(1) };
+ DISPC_VID_POSITION(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_VID3_POSITION /* VID 3 pipeline*/
+#endif
+
+ };
u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
dispc_write_reg(pos_reg[plane], val);
@@ -760,7 +977,12 @@ static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
{
const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE,
DISPC_VID_PICTURE_SIZE(0),
- DISPC_VID_PICTURE_SIZE(1) };
+ DISPC_VID_PICTURE_SIZE(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_PICTURE_SIZE(0) /* VID 3 pipeline*/
+#endif
+
+ };
u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
dispc_write_reg(siz_reg[plane], val);
}
@@ -769,7 +991,12 @@ static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
{
u32 val;
const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0),
- DISPC_VID_SIZE(1) };
+ DISPC_VID_SIZE(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_SIZE(0) /* VID 3 pipeline*/
+#endif
+
+ };
BUG_ON(plane == OMAP_DSS_GFX);
@@ -780,8 +1007,9 @@ static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
{
+#ifndef CONFIG_ARCH_OMAP4 /* all pipelines have alpha coeff in OMAP4 */
BUG_ON(plane == OMAP_DSS_VIDEO1);
-
+#endif
if (cpu_is_omap24xx())
return;
@@ -789,13 +1017,25 @@ static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
else if (plane == OMAP_DSS_VIDEO2)
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16);
+#ifdef CONFIG_ARCH_OMAP4
+ else if (plane == OMAP_DSS_VIDEO1)
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 15, 8);
+ else if (plane == OMAP_DSS_VIDEO3)
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 31, 24);
+#endif
+
}
static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
{
const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC,
DISPC_VID_PIXEL_INC(0),
- DISPC_VID_PIXEL_INC(1) };
+ DISPC_VID_PIXEL_INC(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_PIXEL_INC(0) /* VID 3 pipeline*/
+#endif
+
+ };
dispc_write_reg(ri_reg[plane], inc);
}
@@ -804,7 +1044,12 @@ static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
{
const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC,
DISPC_VID_ROW_INC(0),
- DISPC_VID_ROW_INC(1) };
+ DISPC_VID_ROW_INC(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_ROW_INC(0) /* VID 3 pipeline*/
+#endif
+
+ };
dispc_write_reg(ri_reg[plane], inc);
}
@@ -814,39 +1059,80 @@ static void _dispc_set_color_mode(enum omap_plane plane,
{
u32 m = 0;
- switch (color_mode) {
- case OMAP_DSS_COLOR_CLUT1:
- m = 0x0; break;
- case OMAP_DSS_COLOR_CLUT2:
- m = 0x1; break;
- case OMAP_DSS_COLOR_CLUT4:
- m = 0x2; break;
- case OMAP_DSS_COLOR_CLUT8:
- m = 0x3; break;
- case OMAP_DSS_COLOR_RGB12U:
- m = 0x4; break;
- case OMAP_DSS_COLOR_ARGB16:
- m = 0x5; break;
- case OMAP_DSS_COLOR_RGB16:
- m = 0x6; break;
- case OMAP_DSS_COLOR_RGB24U:
- m = 0x8; break;
- case OMAP_DSS_COLOR_RGB24P:
- m = 0x9; break;
- case OMAP_DSS_COLOR_YUV2:
- m = 0xa; break;
- case OMAP_DSS_COLOR_UYVY:
- m = 0xb; break;
- case OMAP_DSS_COLOR_ARGB32:
- m = 0xc; break;
- case OMAP_DSS_COLOR_RGBA32:
- m = 0xd; break;
- case OMAP_DSS_COLOR_RGBX32:
- m = 0xe; break;
- default:
- BUG(); break;
- }
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_GFX == plane) {
+#endif
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ m = 0x0; break;
+ case OMAP_DSS_COLOR_CLUT2:
+ m = 0x1; break;
+ case OMAP_DSS_COLOR_CLUT4:
+ m = 0x2; break;
+ case OMAP_DSS_COLOR_CLUT8:
+ m = 0x3; break;
+ case OMAP_DSS_COLOR_RGB12U:
+ m = 0x4; break;
+ case OMAP_DSS_COLOR_ARGB16:
+ m = 0x5; break;
+ case OMAP_DSS_COLOR_RGB16:
+ m = 0x6; break;
+ case OMAP_DSS_COLOR_RGB24U:
+ m = 0x8; break;
+ case OMAP_DSS_COLOR_RGB24P:
+ m = 0x9; break;
+ case OMAP_DSS_COLOR_YUV2:
+ m = 0xa; break;
+ case OMAP_DSS_COLOR_UYVY:
+ m = 0xb; break;
+ case OMAP_DSS_COLOR_ARGB32:
+ m = 0xc; break;
+ case OMAP_DSS_COLOR_RGBA32:
+ m = 0xd; break;
+ case OMAP_DSS_COLOR_RGBX32:
+ m = 0xe; break;
+ default:
+ BUG(); break;
+ }
+#ifdef CONFIG_ARCH_OMAP4
+ } else {
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_NV12:
+ m = 0x0; break;
+ case OMAP_DSS_COLOR_RGB12U:
+ m = 0x1; break;
+ case OMAP_DSS_COLOR_RGBA12:
+ m = 0x2; break;
+ case OMAP_DSS_COLOR_XRGB12:
+ m = 0x4; break;
+ case OMAP_DSS_COLOR_ARGB16:
+ m = 0x5; break;
+ case OMAP_DSS_COLOR_RGB16:
+ m = 0x6; break;
+ case OMAP_DSS_COLOR_ARGB16_1555:
+ m = 0x7; break;
+ case OMAP_DSS_COLOR_RGB24U:
+ m = 0x8; break;
+ case OMAP_DSS_COLOR_RGB24P:
+ m = 0x9; break;
+ case OMAP_DSS_COLOR_YUV2:
+ m = 0xA; break;
+ case OMAP_DSS_COLOR_UYVY:
+ m = 0xB; break;
+ case OMAP_DSS_COLOR_ARGB32:
+ m = 0xC; break;
+ case OMAP_DSS_COLOR_RGBA32:
+ m = 0xD; break;
+ case OMAP_DSS_COLOR_RGBX24_32_ALGN:
+ m = 0xE; break;
+ case OMAP_DSS_COLOR_XRGB15:
+ m = 0xF; break;
+ default:
+ BUG(); break;
+ }
+ }
+#endif
REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1);
}
@@ -855,6 +1141,9 @@ static void _dispc_set_channel_out(enum omap_plane plane,
{
int shift;
u32 val;
+#ifdef CONFIG_ARCH_OMAP4
+ int chan = 0, chan2 = 0;
+#endif
switch (plane) {
case OMAP_DSS_GFX:
@@ -862,6 +1151,9 @@ static void _dispc_set_channel_out(enum omap_plane plane,
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+#ifdef CONFIG_ARCH_OMAP4
+ case OMAP_DSS_VIDEO3: /* VID 3 pipeline*/
+#endif
shift = 16;
break;
default:
@@ -870,7 +1162,20 @@ static void _dispc_set_channel_out(enum omap_plane plane,
}
val = dispc_read_reg(dispc_reg_att[plane]);
+#ifdef CONFIG_ARCH_OMAP4
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ chan = 0; chan2 = 0; break;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ chan = 1; chan2 = 0; break;
+ case OMAP_DSS_CHANNEL_LCD2:
+ chan = 0; chan2 = 1; break;
+ }
+ val = FLD_MOD(val, chan, shift, shift);
+ val = FLD_MOD(val, chan2, 31, 30);
+#else
val = FLD_MOD(val, channel, shift, shift);
+#endif
dispc_write_reg(dispc_reg_att[plane], val);
}
@@ -888,6 +1193,9 @@ void dispc_set_burst_size(enum omap_plane plane,
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+#ifdef CONFIG_ARCH_OMAP4
+ case OMAP_DSS_VIDEO3: /* VID 3 pipeline*/
+#endif
shift = 14;
break;
default:
@@ -902,6 +1210,66 @@ void dispc_set_burst_size(enum omap_plane plane,
enable_clocks(0);
}
+#ifdef CONFIG_ARCH_OMAP4
+void dispc_set_zorder(enum omap_plane plane,
+ enum omap_overlay_zorder zorder)
+{
+ u32 val;
+
+ val = dispc_read_reg(dispc_reg_att[plane]);
+ val = FLD_MOD(val, zorder, 27, 26);
+ dispc_write_reg(dispc_reg_att[plane], val);
+
+}
+void dispc_enable_zorder(enum omap_plane plane, bool enable)
+{
+ u32 val;
+
+ val = dispc_read_reg(dispc_reg_att[plane]);
+ val = FLD_MOD(val, enable, 25, 25);
+ dispc_write_reg(dispc_reg_att[plane], val);
+
+}
+/* this routine is a collection of some fine tuned settings from SiVal test, needs to be revisited on SDC */
+void dispc_enable_preload(enum omap_plane plane, bool enable)
+{
+ u32 val;
+ int x, y;
+
+ /* enable preload */
+ val = dispc_read_reg(dispc_reg_att[plane]);
+ val = FLD_MOD(val, enable, 19, 19);
+ dispc_write_reg(dispc_reg_att[plane], val);
+
+ /* DMA preload values */
+ dispc_write_reg(DISPC_VID_PRELOAD(0), 0x100);
+
+ /* clk divisor for DISPC_CORE_CLK */
+ x = 1; /* 1 for 1080P 2 for others */
+ y = 1;
+ val = FLD_VAL(x, 23, 16) | FLD_VAL(y, 0, 0);
+ dispc_write_reg(DISPC_DIVISOR, val);
+}
+void dispc_set_idle_mode(void)
+{
+ u32 l;
+
+ l = dispc_read_reg(DISPC_SYSCONFIG);
+ l = FLD_MOD(l, 1, 13, 12); /* MIDLEMODE: smart standby */
+ l = FLD_MOD(l, 1, 4, 3); /* SIDLEMODE: smart idle */
+ l = FLD_MOD(l, 0, 2, 2); /* ENWAKEUP */
+ l = FLD_MOD(l, 0, 0, 0); /* AUTOIDLE */
+ dispc_write_reg(DISPC_SYSCONFIG, l);
+
+}
+void dispc_enable_gamma_table(bool enable)
+{
+ REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
+}
+
+
+#endif
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -927,13 +1295,18 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
enable_clocks(0);
}
-void dispc_set_lcd_size(u16 width, u16 height)
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
{
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
enable_clocks(1);
- dispc_write_reg(DISPC_SIZE_LCD, val);
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ dispc_write_reg(DISPC_SIZE_LCD2, val);
+ else
+#endif
+ dispc_write_reg(DISPC_SIZE_LCD, val);
enable_clocks(0);
}
@@ -947,11 +1320,18 @@ void dispc_set_digit_size(u16 width, u16 height)
enable_clocks(0);
}
+
static void dispc_read_plane_fifo_sizes(void)
{
const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
DISPC_VID_FIFO_SIZE_STATUS(0),
- DISPC_VID_FIFO_SIZE_STATUS(1) };
+ DISPC_VID_FIFO_SIZE_STATUS(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_BUF_SIZE_STATUS(0)
+ /* VID 3 pipeline*/
+#endif
+
+ };
u32 size;
int plane;
@@ -962,6 +1342,8 @@ static void dispc_read_plane_fifo_sizes(void)
size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 8, 0);
else if (cpu_is_omap34xx())
size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 10, 0);
+ else if (cpu_is_omap44xx())
+ size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 15, 0);
else
BUG();
@@ -980,21 +1362,35 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
{
const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
DISPC_VID_FIFO_THRESHOLD(0),
- DISPC_VID_FIFO_THRESHOLD(1) };
+ DISPC_VID_FIFO_THRESHOLD(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_BUF_THRESHOLD(0)
+ /* VID 3 pipeline*/
+#endif
+
+ };
enable_clocks(1);
DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
plane,
+#ifndef CONFIG_ARCH_OMAP4
REG_GET(ftrs_reg[plane], 11, 0),
REG_GET(ftrs_reg[plane], 27, 16),
+#else
+ REG_GET(ftrs_reg[plane], 15, 0),
+ REG_GET(ftrs_reg[plane], 31, 16),
+#endif
low, high);
if (cpu_is_omap24xx())
dispc_write_reg(ftrs_reg[plane],
- FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0));
- else
- dispc_write_reg(ftrs_reg[plane],
- FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0));
+ FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0));
+ else if (cpu_is_omap34xx())
+ dispc_write_reg(ftrs_reg[plane],
+ FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0));
+ else /* cpu is omap44xx */
+ dispc_write_reg(ftrs_reg[plane],
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
enable_clocks(0);
}
@@ -1013,7 +1409,13 @@ static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc)
{
u32 val;
const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
- DISPC_VID_FIR(1) };
+ DISPC_VID_FIR(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_FIR(0)
+ /* VID 3 pipeline*/
+#endif
+
+ };
BUG_ON(plane == OMAP_DSS_GFX);
@@ -1028,11 +1430,19 @@ static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
{
u32 val;
const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
- DISPC_VID_ACCU0(1) };
+ DISPC_VID_ACCU0(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_ACCU0(0)
+ /* VID 3 pipeline*/
+#endif
- BUG_ON(plane == OMAP_DSS_GFX);
+ };
- val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ BUG_ON(plane == OMAP_DSS_GFX);
+ if (cpu_is_omap44xx())
+ val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
+ else
+ val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
dispc_write_reg(ac0_reg[plane-1], val);
}
@@ -1040,58 +1450,267 @@ static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
{
u32 val;
const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
- DISPC_VID_ACCU1(1) };
+ DISPC_VID_ACCU1(1)
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_VID_V3_WB_ACCU1(0) /* VID 3 pipeline*/
+#endif
+
+ };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+ if (cpu_is_omap44xx())
+ val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
+ else
+ val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dispc_write_reg(ac1_reg[plane-1], val);
+}
+
+#ifdef CONFIG_ARCH_OMAP4
+static void _dispc_set_fir2(enum omap_plane plane, int hinc, int vinc)
+{
+ u32 val;
+ const struct dispc_reg fir_reg[] = { DISPC_VID_FIR2(0),
+ DISPC_VID_FIR2(1),
+ DISPC_VID_V3_WB_FIR2(0)
+ /* VID 3 pipeline*/
+ };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
+
+ dispc_write_reg(fir_reg[plane-1], val);
+}
+
+static void _dispc_set_vid_accu2_0(enum omap_plane plane, int haccu, int vaccu)
+{
+ u32 val;
+ const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU2_0(0),
+ DISPC_VID_ACCU2_0(1),
+ DISPC_VID_V3_WB_ACCU2_0(0)
+ };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
+ dispc_write_reg(ac0_reg[plane-1], val);
+}
+
+static void _dispc_set_vid_accu2_1(enum omap_plane plane, int haccu, int vaccu)
+{
+ u32 val;
+ const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU2_1(0),
+ DISPC_VID_ACCU2_1(1),
+ DISPC_VID_V3_WB_ACCU2_1(0)
+ };
BUG_ON(plane == OMAP_DSS_GFX);
- val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
dispc_write_reg(ac1_reg[plane-1], val);
}
+#endif
+
+static const s8 fir5_zero[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+static const s8 fir3_m8[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2, 5, 7, 64, 32, 12, 3,
+ 128, 123, 111, 89, 64, 89, 111, 123,
+ 0, 3, 12, 32, 0, 7, 5, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+static const s8 fir5_m8[] = {
+ 17, 18, 15, 9, -18, -6, 5, 13,
+ -20, -27, -30, -27, 81, 47, 17, -4,
+ 134, 127, 121, 105, 81, 105, 121, 127,
+ -20, -4, 17, 47, -18, -27, -30, -27,
+ 17, 14, 5, -6, 2, 9, 15, 19,
+};
+static const s8 fir5_m8b[] = {
+ 0, 0, -1, -2, -9, -5, -2, -1,
+ 0, -8, -11, -11, 73, 51, 30, 13,
+ 128, 124, 112, 95, 73, 95, 112, 124,
+ 0, 13, 30, 51, -9, -11, -11, -8,
+ 0, -1, -2, -5, 0, -2, -1, 0,
+};
+static const s8 fir5_m9[] = {
+ 8, 14, 17, 17, -26, -18, -9, 1,
+ -8, -21, -27, -30, 83, 56, 30, 8,
+ 128, 126, 117, 103, 83, 103, 117, 126,
+ -8, 8, 30, 56, -26, -30, -27, -21,
+ 8, 1, -9, -18, 14, 17, 17, 14,
+};
+static const s8 fir5_m10[] = {
+ -2, 5, 11, 15, -28, -24, -18, -10,
+ 2, -12, -22, -27, 83, 62, 41, 20,
+ 128, 125, 116, 102, 83, 102, 116, 125,
+ 2, 20, 41, 62, -28, -27, -22, -12,
+ -2, -10, -18, -24, 18, 15, 11, 5,
+};
+static const s8 fir5_m11[] = {
+ -12, -4, 3, 9, -26, -27, -24, -19,
+ 12, -3, -15, -22, 83, 67, 49, 30,
+ 128, 124, 115, 101, 83, 101, 115, 124,
+ 12, 30, 49, 67, -26, -22, -15, -3,
+ -12, -19, -24, -27, 14, 9, 3, -4,
+};
+static const s8 fir5_m12[] = {
+ -19, -12, -6, 1, -21, -25, -26, -24,
+ 21, 6, -7, -16, 82, 70, 55, 38,
+ 124, 120, 112, 98, 82, 98, 112, 120,
+ 21, 38, 55, 70, -21, -16, -7, 6,
+ -19, -24, -26, -25, 6, 1, -6, -12,
+};
+static const s8 fir5_m13[] = {
+ -22, -18, -12, -6, -17, -22, -25, -25,
+ 27, 13, 0, -10, 81, 71, 58, 43,
+ 118, 115, 107, 95, 81, 95, 107, 115,
+ 27, 43, 58, 71, -17, -10, 0, 13,
+ -22, -25, -25, -22, 0, -6, -12, -18,
+};
+static const s8 fir5_m14[] = {
+ -23, -20, -16, -11, -11, -18, -22, -24,
+ 32, 18, 6, -4, 78, 70, 59, 46,
+ 110, 108, 101, 91, 78, 91, 101, 108,
+ 32, 46, 59, 70, -11, -4, 6, 18,
+ -23, -24, -22, -18, -6, -11, -16, -20,
+};
+static const s8 fir3_m16[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 36, 31, 27, 23, 55, 50, 45, 40,
+ 56, 57, 56, 55, 55, 55, 56, 57,
+ 36, 40, 45, 50, 18, 23, 27, 31,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+static const s8 fir5_m16[] = {
+ -20, -21, -19, -17, -2, -9, -14, -18,
+ 37, 26, 15, 6, 73, 66, 58, 48,
+ 94, 93, 88, 82, 73, 82, 88, 93,
+ 37, 48, 58, 66, -2, 6, 15, 26,
+ -20, -18, -14, -9, -14, -17, -19, -21,
+};
+static const s8 fir5_m19[] = {
+ -12, -14, -16, -16, 8, 1, -4, -9,
+ 38, 31, 22, 15, 64, 59, 53, 47,
+ 76, 72, 73, 69, 64, 69, 73, 72,
+ 38, 47, 53, 59, 8, 15, 22, 31,
+ -12, -8, -4, 1, -16, -16, -16, -13,
+};
+static const s8 fir5_m22[] = {
+ -6, -8, -11, -13, 13, 8, 3, -2,
+ 37, 32, 25, 19, 58, 53, 48, 44,
+ 66, 61, 63, 61, 58, 61, 63, 61,
+ 37, 44, 48, 53, 13, 19, 25, 32,
+ -6, -1, 3, 8, -14, -13, -11, -7,
+};
+static const s8 fir5_m26[] = {
+ 1, -2, -5, -8, 18, 13, 8, 4,
+ 36, 31, 27, 22, 51, 48, 44, 40,
+ 54, 55, 54, 53, 51, 53, 54, 55,
+ 36, 40, 44, 48, 18, 22, 27, 31,
+ 1, 4, 8, 13, -10, -8, -5, -2,
+};
+static const s8 fir5_m32[] = {
+ 7, 4, 1, -1, 21, 17, 14, 10,
+ 34, 31, 27, 24, 45, 42, 39, 37,
+ 46, 46, 46, 46, 45, 46, 46, 46,
+ 34, 37, 39, 42, 21, 24, 28, 31,
+ 7, 10, 14, 17, -4, -1, 1, 4,
+};
+
+static const s8 *get_scaling_coef(int orig_size, int out_size,
+ int orig_ilaced, int out_ilaced,
+ int three_tap)
+{
+ /* ranges from 2 to 32 */
+ int two_m = 16 * orig_size / out_size;
+
+ if (orig_size > 4 * out_size || out_size > 8 * orig_size)
+ return fir5_zero;
+
+ /* interlaced output needs at least M = 16 */
+ if (out_ilaced) {
+ if (two_m < 32)
+ two_m = 32;
+ }
+
+ if (three_tap)
+ return two_m < 24 ? fir3_m8 : fir3_m16;
+
+ return orig_size < out_size ? fir5_m8b :
+ two_m < 17 ? fir5_m8 :
+ two_m < 19 ? fir5_m9 :
+ two_m < 21 ? fir5_m10 :
+ two_m < 23 ? fir5_m11 :
+ two_m < 25 ? fir5_m12 :
+ two_m < 27 ? fir5_m13 :
+ two_m < 30 ? fir5_m14 :
+ two_m < 35 ? fir5_m16 :
+ two_m < 41 ? fir5_m19 :
+ two_m < 48 ? fir5_m22 :
+ two_m < 58 ? fir5_m26 :
+ fir5_m32;
+}
static void _dispc_set_scaling(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode)
+ bool ilace, bool three_taps,
+ bool fieldmode, int scale_x, int scale_y)
{
int fir_hinc;
int fir_vinc;
- int hscaleup, vscaleup;
int accu0 = 0;
int accu1 = 0;
u32 l;
+ const s8 *hfir, *vfir;
BUG_ON(plane == OMAP_DSS_GFX);
- hscaleup = orig_width <= out_width;
- vscaleup = orig_height <= out_height;
-
- _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps);
-
- if (!orig_width || orig_width == out_width)
+ if (scale_x) {
+ fir_hinc = 1024 * (orig_width - 1) / (out_width - 1);
+ if (fir_hinc > 4095)
+ fir_hinc = 4095;
+ hfir = get_scaling_coef(orig_width, out_width, 0, 0, 0);
+ } else {
fir_hinc = 0;
- else
- fir_hinc = 1024 * orig_width / out_width;
+ hfir = fir5_zero;
+ }
- if (!orig_height || orig_height == out_height)
+ if (scale_y) {
+ fir_vinc = 1024 * (orig_height - 1) / (out_height - 1);
+ if (fir_vinc > 4095)
+ fir_vinc = 4095;
+ vfir = get_scaling_coef(orig_height, out_height, 0, 0,
+ three_taps);
+ } else {
fir_vinc = 0;
- else
- fir_vinc = 1024 * orig_height / out_height;
+ vfir = fir5_zero;
+ }
+ _dispc_set_scale_coef(plane, hfir, vfir, three_taps);
_dispc_set_fir(plane, fir_hinc, fir_vinc);
l = dispc_read_reg(dispc_reg_att[plane]);
- l &= ~((0x0f << 5) | (0x3 << 21));
-
+ /* setting attrib register for scaling */
+#ifndef CONFIG_ARCH_OMAP4
+ l &= ~((0x0f << 5) | (0x1 << 21));
+ l |= out_width > orig_width ? 0 : (1 << 7);
+ l |= out_height > orig_height ? 0 : (1 << 8);
+#else
+ l &= ~((0x03 << 5) | (0x1 << 21));
+#endif
l |= fir_hinc ? (1 << 5) : 0;
l |= fir_vinc ? (1 << 6) : 0;
- l |= hscaleup ? 0 : (1 << 7);
- l |= vscaleup ? 0 : (1 << 8);
-
- l |= five_taps ? (1 << 21) : 0;
- l |= five_taps ? (1 << 22) : 0;
+ l |= three_taps ? 0 : (1 << 21);
dispc_write_reg(dispc_reg_att[plane], l);
@@ -1112,9 +1731,83 @@ static void _dispc_set_scaling(enum omap_plane plane,
_dispc_set_vid_accu1(plane, 0, accu1);
}
+static void _dispc_set_scaling_uv(enum omap_plane plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool three_taps,
+ bool fieldmode, int scale_x, int scale_y)
+{
+ int i;
+ int fir_hinc, fir_vinc;
+ int accu0, accu1, accuh;
+ const s8 *hfir, *vfir;
+
+ if (scale_x) {
+ fir_hinc = 1024 * (orig_width - 1) / (out_width - 1);
+ if (fir_hinc > 4095)
+ fir_hinc = 4095;
+ hfir = get_scaling_coef(orig_width, out_width, 0, 0, 0);
+ } else {
+ fir_hinc = 0;
+ hfir = fir5_zero;
+ }
+
+ if (scale_y) {
+ fir_vinc = 1024 * (orig_height - 0) / (out_height - 0);
+ if (fir_vinc > 4095)
+ fir_vinc = 4095;
+ vfir = get_scaling_coef(orig_height, out_height, 0,
+ ilace, three_taps);
+ } else {
+ fir_vinc = 0;
+ vfir = fir5_zero;
+ }
+
+ for (i = 0; i < 8; i++, hfir++, vfir++) {
+ u32 h, hv, v;
+ h = ((hfir[0] & 0xFF) | ((hfir[8] << 8) & 0xFF00) |
+ ((hfir[16] << 16) & 0xFF0000) |
+ ((hfir[24] << 24) & 0xFF000000));
+ hv = ((hfir[32] & 0xFF) | ((vfir[8] << 8) & 0xFF00) |
+ ((vfir[16] << 16) & 0xFF0000) |
+ ((vfir[24] << 24) & 0xFF000000));
+ v = ((vfir[0] & 0xFF) | ((vfir[32] << 8) & 0xFF00));
+
+ _dispc_write_firh2_reg(plane, i, h);
+ _dispc_write_firhv2_reg(plane, i, hv);
+ _dispc_write_firv2_reg(plane, i, v);
+ }
+
+ /* set chroma resampling */
+ REG_FLD_MOD(DISPC_VID_ATTRIBUTES2(plane - 1),
+ (fir_hinc || fir_vinc) ? 1 : 0, 8, 8);
+
+ /* set H scaling */
+ REG_FLD_MOD(dispc_reg_att[plane], fir_hinc ? 1 : 0, 6, 6);
+
+ /* set V scaling */
+ REG_FLD_MOD(dispc_reg_att[plane], fir_vinc ? 1 : 0, 5, 5);
+
+ _dispc_set_fir2(plane, fir_hinc, fir_vinc);
+
+ if (ilace) {
+ accu0 = (-3 * fir_vinc / 4) % 1024;
+ accu1 = (-fir_vinc / 4) % 1024;
+ } else {
+ accu0 = accu1 = (-fir_vinc / 2) % 1024;
+ }
+ accuh = (-fir_hinc / 2) % 1024;
+
+ _dispc_set_vid_accu2_0(plane, 0x80, 0);
+ _dispc_set_vid_accu2_1(plane, 0x80, 0);
+ /* _dispc_set_vid_accu2_0(plane, accuh, accu0);
+ _dispc_set_vid_accu2_1(plane, accuh, accu1); */
+}
+
static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
bool mirroring, enum omap_color_mode color_mode)
{
+#ifndef CONFIG_ARCH_OMAP4
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY) {
int vidrot = 0;
@@ -1161,6 +1854,20 @@ static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12);
REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18);
}
+#else
+ if (plane != OMAP_DSS_GFX) {
+ if (color_mode == OMAP_DSS_COLOR_NV12) {
+ /* DOUBLESTRIDE : 0 for 90-, 270-; 1 for 0- and 180- */
+ if (rotation == 1 || rotation == 3)
+ REG_FLD_MOD(dispc_reg_att[plane], 0x0, 22, 22);
+ else
+ REG_FLD_MOD(dispc_reg_att[plane], 0x1, 22, 22);
+ }
+ }
+
+ /* Set the rotation value for pipeline */
+/* REG_FLD_MOD(dispc_reg_att[plane], rotation, 13, 12); */
+#endif
}
static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -1204,6 +1911,70 @@ static s32 pixinc(int pixels, u8 ps)
BUG();
}
+static void calc_tiler_row_rotation(u8 rotation,
+ u16 width, u16 height,
+ enum omap_color_mode color_mode,
+ s32 *row_inc)
+{
+ u8 ps = 1;
+ DSSDBG("calc_tiler_rot(%d): %dx%d\n", rotation, width, height);
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_RGB16:
+ case OMAP_DSS_COLOR_ARGB16:
+
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ ps = 2;
+ break;
+
+ case OMAP_DSS_COLOR_RGB24P:
+ case OMAP_DSS_COLOR_RGB24U:
+ case OMAP_DSS_COLOR_ARGB32:
+ case OMAP_DSS_COLOR_RGBA32:
+ case OMAP_DSS_COLOR_RGBX32:
+ ps = 4;
+ break;
+
+ case OMAP_DSS_COLOR_NV12:
+ ps = 1;
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ switch (rotation) {
+ case 0:
+ case 2:
+ if (1 == ps)
+ *row_inc = 16384 + 1 - (width);
+ else
+ *row_inc = 32768 + 1 - (width * ps);
+ break;
+
+ case 1:
+ case 3:
+ if (4 == ps)
+ *row_inc = 16384 + 1 - (width * ps);
+ else
+ *row_inc = 8192 + 1 - (width * ps);
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ DSSDBG(
+ " colormode: %d, rotation: %d, ps: %d, width: %d,"
+ " height: %d, row_inc:%d\n",
+ color_mode, rotation, ps, width, height, *row_inc);
+
+ return;
+}
+
static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
u16 screen_width,
u16 width, u16 height,
@@ -1428,12 +2199,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
}
}
-static unsigned long calc_fclk_five_taps(u16 width, u16 height,
- u16 out_width, u16 out_height, enum omap_color_mode color_mode)
+static unsigned long calc_fclk_five_taps(enum omap_channel channel,
+ u16 width, u16 height, u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode)
{
u32 fclk = 0;
/* FIXME venc pclk? */
- u64 tmp, pclk = dispc_pclk_rate();
+ u64 tmp, pclk = dispc_pclk_rate(channel);
if (height > out_height) {
/* FIXME get real display PPL */
@@ -1462,8 +2234,8 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height,
return fclk;
}
-static unsigned long calc_fclk(u16 width, u16 height,
- u16 out_width, u16 out_height)
+static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+ u16 height, u16 out_width, u16 out_height)
{
unsigned int hf, vf;
@@ -1487,7 +2259,7 @@ static unsigned long calc_fclk(u16 width, u16 height,
vf = 1;
/* FIXME venc pclk? */
- return dispc_pclk_rate() * vf * hf;
+ return dispc_pclk_rate(channel) * vf * hf;
}
void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
@@ -1497,6 +2269,8 @@ void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
enable_clocks(0);
}
+
+
static int _dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
@@ -1506,10 +2280,19 @@ static int _dispc_setup_plane(enum omap_plane plane,
bool ilace,
enum omap_dss_rotation_type rotation_type,
u8 rotation, int mirror,
- u8 global_alpha)
+ u8 global_alpha, enum omap_channel channel
+#ifdef CONFIG_ARCH_OMAP4
+ , u32 puv_addr
+#endif
+ )
{
- const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
- bool five_taps = 0;
+
+#ifdef CONFIG_ARCH_OMAP4
+ int maxdownscale = 4;
+#else
+ int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
+#endif
+ bool three_taps = 0;
bool fieldmode = 0;
int cconv = 0;
unsigned offset0, offset1;
@@ -1518,6 +2301,12 @@ static int _dispc_setup_plane(enum omap_plane plane,
u16 frame_height = height;
unsigned int field_offset = 0;
+ u8 orientation = 0;
+ struct tiler_view_orient orient;
+ unsigned long r, mir_x = 0, mir_y = 0;
+ unsigned long tiler_width, tiler_height;
+ void __iomem *reg = NULL;
+
if (paddr == 0)
return -EINVAL;
@@ -1583,12 +2372,23 @@ static int _dispc_setup_plane(enum omap_plane plane,
case OMAP_DSS_COLOR_ARGB16:
case OMAP_DSS_COLOR_ARGB32:
case OMAP_DSS_COLOR_RGBA32:
+#ifdef CONFIG_ARCH_OMAP4
+ case OMAP_DSS_COLOR_RGBA12:
+ case OMAP_DSS_COLOR_XRGB12:
+ case OMAP_DSS_COLOR_ARGB16_1555:
+ case OMAP_DSS_COLOR_RGBX24_32_ALGN:
+ case OMAP_DSS_COLOR_XRGB15:
+#else
if (cpu_is_omap24xx())
return -EINVAL;
if (plane == OMAP_DSS_VIDEO1)
return -EINVAL;
+#endif
break;
+#ifdef CONFIG_ARCH_OMAP4
+ case OMAP_DSS_COLOR_NV12:
+#endif
case OMAP_DSS_COLOR_YUV2:
case OMAP_DSS_COLOR_UYVY:
cconv = 1;
@@ -1598,27 +2398,33 @@ static int _dispc_setup_plane(enum omap_plane plane,
return -EINVAL;
}
- /* Must use 5-tap filter? */
- five_taps = height > out_height * 2;
+ /* Must use 3-tap filter */
+ three_taps = width > 1280;
+
+ /* Should use 3-tap filter for upscaling, but HDMI gets
+ out of sync if using 3-tap */
+ /* if (out_height > height)
+ three_taps = 1; */
- if (!five_taps) {
- fclk = calc_fclk(width, height,
+ if (three_taps) {
+ fclk = calc_fclk(channel, width, height,
out_width, out_height);
- /* Try 5-tap filter if 3-tap fclk is too high */
+ /* Try 5-tap filter if 3-tap fclk is too high*/
if (cpu_is_omap34xx() && height > out_height &&
- fclk > dispc_fclk_rate())
- five_taps = true;
+ fclk > dispc_fclk_rate()) {
+ printk(KERN_ERR
+ "Should use 5 tap but cannot\n");
+ }
+ } else {
+ fclk = calc_fclk_five_taps(channel, width, height,
+ out_width, out_height, color_mode);
}
- if (width > (2048 >> five_taps)) {
- DSSERR("failed to set up scaling, fclk too low\n");
+#ifndef CONFIG_ARCH_OMAP4
+ if (width > (1024 << three_taps))
return -EINVAL;
- }
-
- if (five_taps)
- fclk = calc_fclk_five_taps(width, height,
- out_width, out_height, color_mode);
+#endif
DSSDBG("required fclk rate = %lu Hz\n", fclk);
DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
@@ -1649,7 +2455,58 @@ static int _dispc_setup_plane(enum omap_plane plane,
/* Fields are independent but interleaved in memory. */
if (fieldmode)
field_offset = 1;
+#ifdef CONFIG_ARCH_OMAP4 /*TODO: OMAP4: check ?! */
+ pix_inc = 0x1;
+ offset0 = 0x0;
+ offset1 = 0x0;
+ /* check if tiler address; else set row_inc = 1*/
+ if ((paddr >= 0x60000000) && (paddr <= 0x7fffffff)) {
+ calc_tiler_row_rotation(rotation, width, frame_height,
+ color_mode, &row_inc);
+ orientation = calc_tiler_orientation(rotation, (u8)mirror);
+ /* get rotated top-left coordinate
+ (if rotation is applied before mirroring) */
+ memset(&orient, 0, sizeof(orient));
+ tiler_rotate_view(&orient, rotation * 90);
+
+ if (mirror) {
+ /* Horizontal mirroring */
+ if (rotation == 1 || rotation == 3)
+ mir_x = 1;
+ else
+ mir_y = 1;
+ } else {
+ mir_x = 0;
+ mir_y = 0;
+ }
+ orient.x_invert ^= mir_x;
+ orient.y_invert ^= mir_y;
+
+ if (orient.rotate_90 & 1) {
+ tiler_height = width;
+ tiler_width = height;
+ } else {
+ tiler_height = height;
+ tiler_width = width;
+ }
+
+ paddr = tiler_reorient_topleft(tiler_get_natural_addr(paddr),
+ orient, tiler_width, tiler_height);
+
+ if (puv_addr)
+ puv_addr = tiler_reorient_topleft(
+ tiler_get_natural_addr(puv_addr),
+ orient, tiler_width/2, tiler_height/2);
+ DSSDBG(
+ "rotated addresses: 0x%0x, 0x%0x\n",
+ paddr, puv_addr);
+ /* set BURSTTYPE if rotation is non-zero */
+ REG_FLD_MOD(dispc_reg_att[plane], 0x1, 29, 29);
+ } else
+ row_inc = 0x1;
+
+#else
if (rotation_type == OMAP_DSS_ROT_DMA)
calc_dma_rotation_offset(rotation, mirror,
screen_width, width, frame_height, color_mode,
@@ -1660,7 +2517,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
screen_width, width, frame_height, color_mode,
fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc);
-
+#endif
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
@@ -1668,7 +2525,13 @@ static int _dispc_setup_plane(enum omap_plane plane,
_dispc_set_plane_ba0(plane, paddr + offset0);
_dispc_set_plane_ba1(plane, paddr + offset1);
-
+#ifdef CONFIG_ARCH_OMAP4
+ /* TODO: check the offset calculations? */
+ if (OMAP_DSS_COLOR_NV12 == color_mode) {
+ _dispc_set_plane_ba_uv0(plane, puv_addr + offset0);
+ _dispc_set_plane_ba_uv1(plane, puv_addr + offset1);
+ }
+#endif
_dispc_set_row_inc(plane, row_inc);
_dispc_set_pix_inc(plane, pix_inc);
@@ -1680,24 +2543,73 @@ static int _dispc_setup_plane(enum omap_plane plane,
_dispc_set_pic_size(plane, width, height);
if (plane != OMAP_DSS_GFX) {
+ int scale_x = width != out_width;
+ int scale_y = height != out_height;
+#ifdef CONFIG_ARCH_OMAP4
+ u16 out_ch_height = out_height;
+ u16 out_ch_width = out_width;
+ u16 ch_height = height;
+ u16 ch_width = width;
+ int scale_uv = 0;
+
+ /* account for chroma decimation */
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_NV12:
+ ch_height >>= 1; /* Y downsampled by 2 */
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ ch_width >>= 1; /* X downsampled by 2 */
+ /* must use FIR for YUV422 if rotated */
+ if (color_mode != OMAP_DSS_COLOR_NV12 && rotation % 4)
+ scale_x = scale_y = 1;
+ scale_uv = 1;
+ break;
+ default:
+ /* no UV scaling for RGB formats for now */
+ break;
+ }
+
+ if (out_ch_width != ch_width)
+ scale_x = true;
+ if (out_ch_height != ch_height)
+ scale_y = true;
+ /* set up UV scaling */
+ _dispc_set_scaling_uv(plane, ch_width, ch_height,
+ out_ch_width, out_ch_height, ilace,
+ three_taps, fieldmode, scale_uv && scale_x,
+ scale_uv && scale_y);
+ if (!scale_uv || (!scale_x && !scale_y))
+ /* :TRICKY: set chroma resampling for RGB formats */
+ REG_FLD_MOD(DISPC_VID_ATTRIBUTES2(plane - 1), 0, 8, 8);
+#endif
_dispc_set_scaling(plane, width, height,
out_width, out_height,
- ilace, five_taps, fieldmode);
+ ilace, three_taps, fieldmode,
+ scale_x, scale_y);
_dispc_set_vid_size(plane, out_width, out_height);
_dispc_set_vid_color_conv(plane, cconv);
}
_dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
+#ifndef CONFIG_ARCH_OMAP4
if (plane != OMAP_DSS_VIDEO1)
+#endif
_dispc_setup_global_alpha(plane, global_alpha);
+ pix_inc = dispc_read_reg(dispc_reg_att[plane]);
+ DSSDBG("vid[%d] attributes = %x\n", plane, pix_inc);
+
return 0;
}
static void _dispc_enable_plane(enum omap_plane plane, bool enable)
{
REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0);
+ if (!enable) { /* clear out resizer related bits */
+ REG_FLD_MOD(dispc_reg_att[plane], 0x00, 6, 5);
+ REG_FLD_MOD(dispc_reg_att[plane], 0x00, 21, 21);
+ }
}
static void dispc_disable_isr(void *data, u32 mask)
@@ -1706,36 +2618,51 @@ static void dispc_disable_isr(void *data, u32 mask)
complete(compl);
}
-static void _enable_lcd_out(bool enable)
+static void _enable_lcd_out(enum omap_channel channel, bool enable)
{
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
+ else
+#endif
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
}
-void dispc_enable_lcd_out(bool enable)
+void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
{
struct completion frame_done_completion;
bool is_on;
int r;
+ int irq;
enable_clocks(1);
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
- is_on = REG_GET(DISPC_CONTROL, 0, 0);
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel) {
+ is_on = REG_GET(DISPC_CONTROL2, 0, 0);
+ irq = DISPC_IRQ_FRAMEDONE2;
+ } else
+#endif
+ {
+ is_on = REG_GET(DISPC_CONTROL, 0, 0);
+ irq = DISPC_IRQ_FRAMEDONE;
+ }
if (!enable && is_on) {
init_completion(&frame_done_completion);
r = omap_dispc_register_isr(dispc_disable_isr,
&frame_done_completion,
- DISPC_IRQ_FRAMEDONE);
+ irq);
if (r)
DSSERR("failed to register FRAMEDONE isr\n");
}
- _enable_lcd_out(enable);
+ _enable_lcd_out(channel, enable);
if (!enable && is_on) {
if (!wait_for_completion_timeout(&frame_done_completion,
@@ -1744,7 +2671,7 @@ void dispc_enable_lcd_out(bool enable)
r = omap_dispc_unregister_isr(dispc_disable_isr,
&frame_done_completion,
- DISPC_IRQ_FRAMEDONE);
+ irq);
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
@@ -1851,7 +2778,8 @@ void dispc_enable_fifohandcheck(bool enable)
}
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+void dispc_set_lcd_display_type(enum omap_channel channel,
+ enum omap_lcd_display_type type)
{
int mode;
@@ -1870,7 +2798,12 @@ void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
}
enable_clocks(1);
- REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
+ else
+#endif
+ REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
enable_clocks(0);
}
@@ -1885,7 +2818,11 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode)
void dispc_set_default_color(enum omap_channel channel, u32 color)
{
const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
- DISPC_DEFAULT_COLOR1 };
+ DISPC_DEFAULT_COLOR1
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_DEFAULT_COLOR2
+#endif
+ };
enable_clocks(1);
dispc_write_reg(def_reg[channel], color);
@@ -1895,11 +2832,18 @@ void dispc_set_default_color(enum omap_channel channel, u32 color)
u32 dispc_get_default_color(enum omap_channel channel)
{
const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
- DISPC_DEFAULT_COLOR1 };
+ DISPC_DEFAULT_COLOR1
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_DEFAULT_COLOR2
+#endif
+};
+
u32 l;
+#ifndef CONFIG_ARCH_OMAP4
BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
channel != OMAP_DSS_CHANNEL_LCD);
+#endif
enable_clocks(1);
l = dispc_read_reg(def_reg[channel]);
@@ -1913,9 +2857,19 @@ void dispc_set_trans_key(enum omap_channel ch,
u32 trans_key)
{
const struct dispc_reg tr_reg[] = {
- DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
+ DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_TRANS_COLOR2
+#endif
+
+ };
enable_clocks(1);
+#ifdef CONFIG_ARCH_OMAP4
+ if (ch == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
+ else
+#endif
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
else /* OMAP_DSS_CHANNEL_DIGIT */
@@ -1930,10 +2884,20 @@ void dispc_get_trans_key(enum omap_channel ch,
u32 *trans_key)
{
const struct dispc_reg tr_reg[] = {
- DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
+ DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1
+#ifdef CONFIG_ARCH_OMAP4
+ , DISPC_TRANS_COLOR2
+#endif
+
+ };
enable_clocks(1);
if (type) {
+#ifdef CONFIG_ARCH_OMAP4
+ if (ch == OMAP_DSS_CHANNEL_LCD2)
+ *type = REG_GET(DISPC_CONFIG2, 11, 11);
+ else
+#endif
if (ch == OMAP_DSS_CHANNEL_LCD)
*type = REG_GET(DISPC_CONFIG, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -1950,6 +2914,11 @@ void dispc_get_trans_key(enum omap_channel ch,
void dispc_enable_trans_key(enum omap_channel ch, bool enable)
{
enable_clocks(1);
+#ifdef CONFIG_ARCH_OMAP4
+ if (ch == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
+ else
+#endif
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
else /* OMAP_DSS_CHANNEL_DIGIT */
@@ -1992,8 +2961,15 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
bool dispc_trans_key_enabled(enum omap_channel ch)
{
bool enabled;
-
+#ifdef CONFIG_ARCH_OMAP4
+ BUG_ON(ch == OMAP_DSS_CHANNEL_LCD2);
+#endif
enable_clocks(1);
+#ifdef CONFIG_ARCH_OMAP4
+ if (ch == OMAP_DSS_CHANNEL_LCD2)
+ enabled = REG_GET(DISPC_CONFIG2, 10, 10);
+ else
+#endif
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2006,7 +2982,7 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
}
-void dispc_set_tft_data_lines(u8 data_lines)
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -2029,11 +3005,17 @@ void dispc_set_tft_data_lines(u8 data_lines)
}
enable_clocks(1);
- REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+#ifdef CONFIG_ARCH_OMAP4
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
+ else
+#endif
+ REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
enable_clocks(0);
}
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+ enum omap_parallel_interface_mode mode)
{
u32 l;
int stallmode;
@@ -2062,20 +3044,41 @@ void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
}
enable_clocks(1);
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel) {
+ l = dispc_read_reg(DISPC_CONTROL2);
+
+ printk(KERN_INFO "OMAP DISPCONTROL read (stallmode)%d\n",
+ FLD_GET(l, 11, 11));
+
+ l = FLD_MOD(l, stallmode, 11, 11);
+
+ dispc_write_reg(DISPC_CONTROL2, l);
+
+ } else
+#endif
+ {
+ l = dispc_read_reg(DISPC_CONTROL);
- l = dispc_read_reg(DISPC_CONTROL);
+ printk(KERN_INFO "OMAP DISPCONTROL read (stallmode)%d\n",
+ FLD_GET(l, 11, 11));
+ printk(KERN_INFO "OMAP DISPCONTROL read (gpout)%d\n",
+ FLD_GET(l, 15, 15));
+ printk(KERN_INFO "OMAP DISPCONTROL read (stallmode)%d\n",
+ FLD_GET(l, 16, 16));
- l = FLD_MOD(l, stallmode, 11, 11);
- l = FLD_MOD(l, gpout0, 15, 15);
- l = FLD_MOD(l, gpout1, 16, 16);
- dispc_write_reg(DISPC_CONTROL, l);
+ l = FLD_MOD(l, stallmode, 11, 11);
+ l = FLD_MOD(l, gpout0, 15, 15);
+ l = FLD_MOD(l, gpout1, 16, 16);
+ dispc_write_reg(DISPC_CONTROL, l);
+ }
enable_clocks(0);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
- int vsw, int vfp, int vbp)
+ int vsw, int vfp, int vbp)
{
if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
if (hsw < 1 || hsw > 64 ||
@@ -2100,38 +3103,49 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
{
+
return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
timings->hbp, timings->vsw,
timings->vfp, timings->vbp);
}
-static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
- int vsw, int vfp, int vbp)
+static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
+ int hfp, int hbp, int vsw, int vfp, int vbp)
{
u32 timing_h, timing_v;
if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) |
- FLD_VAL(hbp-1, 27, 20);
+ FLD_VAL(hbp-1, 27, 20);
timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) |
- FLD_VAL(vbp, 27, 20);
+ FLD_VAL(vbp, 27, 20);
} else {
+
timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) |
- FLD_VAL(hbp-1, 31, 20);
+ FLD_VAL(hbp-1, 31, 20);
timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) |
- FLD_VAL(vbp, 31, 20);
+ FLD_VAL(vbp, 31, 20);
}
-
enable_clocks(1);
- dispc_write_reg(DISPC_TIMING_H, timing_h);
- dispc_write_reg(DISPC_TIMING_V, timing_v);
+
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel) {
+ dispc_write_reg(DISPC_TIMING_H2, timing_h);
+ dispc_write_reg(DISPC_TIMING_V2, timing_v);
+ } else
+#endif
+ {
+ dispc_write_reg(DISPC_TIMING_H, timing_h);
+ dispc_write_reg(DISPC_TIMING_V, timing_v);
+ }
enable_clocks(0);
}
-/* change name to mode? */
-void dispc_set_lcd_timings(struct omap_video_timings *timings)
+
+void dispc_set_lcd_timings(enum omap_channel channel,
+ struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
@@ -2141,10 +3155,11 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings)
timings->vfp, timings->vbp))
BUG();
- _dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
- timings->vsw, timings->vfp, timings->vbp);
+ _dispc_set_lcd_timings(channel, timings->hsw, timings->hfp,
+ timings->hbp, timings->vsw, timings->vfp,
+ timings->vbp);
- dispc_set_lcd_size(timings->x_res, timings->y_res);
+ dispc_set_lcd_size(channel, timings->x_res, timings->y_res);
xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
@@ -2152,7 +3167,8 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings)
ht = (timings->pixel_clock * 1000) / xtot;
vt = (timings->pixel_clock * 1000) / xtot / ytot;
- DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
+ DSSDBG("channel %u xres %u yres %u\n", channel, timings->x_res,
+ timings->y_res);
DSSDBG("pck %u\n", timings->pixel_clock);
DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
timings->hsw, timings->hfp, timings->hbp,
@@ -2161,25 +3177,44 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings)
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
}
-static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
+void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
+ u16 pck_div)
{
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 2);
enable_clocks(1);
- dispc_write_reg(DISPC_DIVISOR,
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ dispc_write_reg(DISPC_DIVISOR2,
+ FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+ else
+ dispc_write_reg(DISPC_DIVISOR1,
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+#else
+ dispc_write_reg(DISPC_DIVISOR,
+ FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+#endif
enable_clocks(0);
}
-static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
+static void dispc_get_lcd_divisor(enum omap_channel channel,
+ int *lck_div, int *pck_div)
{
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR);
+
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ l = dispc_read_reg(DISPC_DIVISOR2);
+ else
+ l = dispc_read_reg(DISPC_DIVISOR1);
+#else
+ l = dispc_read_reg(DISPC_DIVISOR);
+#endif
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
-
+/* TODO: Check with Senthil on handling of clocks */
unsigned long dispc_fclk_rate(void)
{
unsigned long r = 0;
@@ -2188,21 +3223,26 @@ unsigned long dispc_fclk_rate(void)
r = dss_clk_get_rate(DSS_CLK_FCK1);
else
#ifdef CONFIG_OMAP2_DSS_DSI
- r = dsi_get_dsi1_pll_rate();
+ r = dsi_get_dsi1_pll_rate(0);
#else
BUG();
#endif
return r;
}
-unsigned long dispc_lclk_rate(void)
+unsigned long dispc_lclk_rate(enum omap_channel channel)
{
int lcd;
unsigned long r;
u32 l;
-
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ l = dispc_read_reg(DISPC_DIVISOR2);
+ else
+ l = dispc_read_reg(DISPC_DIVISOR1);
+#else
l = dispc_read_reg(DISPC_DIVISOR);
-
+#endif
lcd = FLD_GET(l, 23, 16);
r = dispc_fclk_rate();
@@ -2210,14 +3250,19 @@ unsigned long dispc_lclk_rate(void)
return r / lcd;
}
-unsigned long dispc_pclk_rate(void)
+unsigned long dispc_pclk_rate(enum omap_channel channel)
{
int lcd, pcd;
unsigned long r;
u32 l;
-
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2 == channel)
+ l = dispc_read_reg(DISPC_DIVISOR2);
+ else
+ l = dispc_read_reg(DISPC_DIVISOR1);
+#else
l = dispc_read_reg(DISPC_DIVISOR);
-
+#endif
lcd = FLD_GET(l, 23, 16);
pcd = FLD_GET(l, 7, 0);
@@ -2232,7 +3277,7 @@ void dispc_dump_clocks(struct seq_file *s)
enable_clocks(1);
- dispc_get_lcd_divisor(&lcd, &pcd);
+ dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
seq_printf(s, "- DISPC -\n");
@@ -2241,8 +3286,22 @@ void dispc_dump_clocks(struct seq_file *s)
"dss1_alwon_fclk" : "dsi1_pll_fclk");
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
- seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
- seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
+ seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
+
+#ifdef CONFIG_ARCH_OMAP4
+ dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
+
+ seq_printf(s, "- DISPC - LCD 2\n");
+
+ seq_printf(s, "dispc fclk source = %s\n",
+ dss_get_dispc_clk_source() == 0 ?
+ "dss1_alwon_fclk" : "dsi1_pll_fclk");
+
+ seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
+ seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
+#endif
enable_clocks(0);
}
@@ -2251,6 +3310,7 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
+
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
DUMPREG(DISPC_REVISION);
@@ -2270,7 +3330,11 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_H);
DUMPREG(DISPC_TIMING_V);
DUMPREG(DISPC_POL_FREQ);
+#ifdef CONFIG_ARCH_OMAP4
+ DUMPREG(DISPC_DIVISOR1);
+#else
DUMPREG(DISPC_DIVISOR);
+#endif
DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_DIG);
DUMPREG(DISPC_SIZE_LCD);
@@ -2387,13 +3451,15 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_VID_PRELOAD(0));
DUMPREG(DISPC_VID_PRELOAD(1));
+/* TODO: OMAP4: add new registers here */
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
#undef DUMPREG
}
-static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
- bool ihs, bool ivs, u8 acbi, u8 acb)
+static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff,
+ bool rf, bool ieo, bool ipc, bool ihs,
+ bool ivs, u8 acbi, u8 acb)
{
u32 l = 0;
@@ -2410,13 +3476,19 @@ static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
l |= FLD_VAL(acb, 7, 0);
enable_clocks(1);
- dispc_write_reg(DISPC_POL_FREQ, l);
+#ifdef CONFIG_ARCH_OMAP4
+ if (OMAP_DSS_CHANNEL_LCD2)
+ dispc_write_reg(DISPC_POL_FREQ2, l);
+ else
+#endif
+ dispc_write_reg(DISPC_POL_FREQ, l);
enable_clocks(0);
}
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
+void dispc_set_pol_freq(enum omap_channel ch, enum omap_panel_config config,
+ u8 acbi, u8 acb)
{
- _dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
+ _dispc_set_pol_freq(ch, (config & OMAP_DSS_LCD_ONOFF) != 0,
(config & OMAP_DSS_LCD_RF) != 0,
(config & OMAP_DSS_LCD_IEO) != 0,
(config & OMAP_DSS_LCD_IPC) != 0,
@@ -2485,12 +3557,14 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-int dispc_set_clock_div(struct dispc_clock_info *cinfo)
+int dispc_set_clock_div(enum omap_channel channel,
+ struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
- dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
+ dispc_set_lcd_divisor(channel, cinfo->lck_div,
+ cinfo->pck_div);
return 0;
}
@@ -2501,9 +3575,13 @@ int dispc_get_clock_div(struct dispc_clock_info *cinfo)
fck = dispc_fclk_rate();
+#ifdef CONFIG_ARCH_OMAP4
+ cinfo->lck_div = REG_GET(DISPC_DIVISOR1, 23, 16);
+ cinfo->pck_div = REG_GET(DISPC_DIVISOR1, 7, 0);
+#else
cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
-
+#endif
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -2722,7 +3800,9 @@ static void dispc_error_worker(struct work_struct *work)
spin_unlock_irqrestore(&dispc.irq_lock, flags);
if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) {
- DSSERR("GFX_FIFO_UNDERFLOW, disabling GFX\n");
+/*SV //HS mode just report the error dont close the pipeline */
+ DSSERR("GFX_FIFO_UNDERFLOW, but dont disable GFX\n");
+#if 0
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
@@ -2737,6 +3817,7 @@ static void dispc_error_worker(struct work_struct *work)
break;
}
}
+#endif
}
if (errors & DISPC_IRQ_VID1_FIFO_UNDERFLOW) {
@@ -2774,6 +3855,67 @@ static void dispc_error_worker(struct work_struct *work)
}
}
}
+#ifdef CONFIG_ARCH_OMAP4
+ if (errors & DISPC_IRQ_VID3_FIFO_UNDERFLOW) {
+ DSSERR("VID3_FIFO_UNDERFLOW, disabling VID2\n");
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id == 3) {
+ dispc_enable_plane(ovl->id, 0);
+ dispc_go(ovl->manager->id);
+ mdelay(50);
+ break;
+ }
+ }
+ }
+
+ if (errors & DISPC_IRQ_SYNC_LOST_2) {
+ struct omap_overlay_manager *manager = NULL;
+ bool enable = false;
+
+ DSSERR("SYNC_LOST for LCD2, disabling LCD2\n");
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ mgr = omap_dss_get_overlay_manager(i);
+#ifdef CONFIG_ARCH_OMAP4
+ if (mgr) {
+ if (mgr->id == OMAP_DSS_CHANNEL_LCD2) {
+ manager = mgr;
+ enable = mgr->device->state ==
+ OMAP_DSS_DISPLAY_ACTIVE;
+ mgr->device->disable(mgr->device);
+ break;
+ }
+ }
+#endif
+ }
+
+ if (manager) {
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id != 0 && ovl->manager == manager)
+ dispc_enable_plane(ovl->id, 0);
+ }
+
+ dispc_go(manager->id);
+ mdelay(50);
+ if (enable)
+ manager->device->enable(manager->device);
+ }
+ }
+
+#endif
if (errors & DISPC_IRQ_SYNC_LOST) {
struct omap_overlay_manager *manager = NULL;
@@ -2814,9 +3956,13 @@ static void dispc_error_worker(struct work_struct *work)
}
if (errors & DISPC_IRQ_SYNC_LOST_DIGIT) {
+#if 0
struct omap_overlay_manager *manager = NULL;
bool enable = false;
+#endif
+ DSSERR("SYNC_LOST_DIGIT\n");
+ /*
DSSERR("SYNC_LOST_DIGIT, disabling TV\n");
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
@@ -2849,6 +3995,7 @@ static void dispc_error_worker(struct work_struct *work)
if (enable)
manager->device->enable(manager->device);
}
+ */
}
if (errors & DISPC_IRQ_OCP_ERR) {
@@ -2929,12 +4076,24 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
}
#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-void dispc_fake_vsync_irq(void)
+void dispc_fake_vsync_irq(int disp_id)
{
- u32 irqstatus = DISPC_IRQ_VSYNC;
+ u32 irqstatus;
int i;
local_irq_disable();
+ switch (disp_id) {
+ case 0:
+ irqstatus = DISPC_IRQ_VSYNC;
+ break;
+ case 1:
+ irqstatus = DISPC_IRQ_VSYNC2;
+ break;
+ default:
+ DSSERR("Invalid display id for fake vsync\n");
+ local_irq_enable();
+ return;
+ }
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
struct omap_dispc_isr_data *isr_data;
@@ -2991,8 +4150,10 @@ static void _omap_dispc_initial_config(void)
l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
dispc_write_reg(DISPC_SYSCONFIG, l);
- /* FUNCGATED */
- REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+ if (!cpu_is_omap44xx()) {
+ /* FUNCGATED: changed bitfield in OMAP4 */
+ REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+ }
/* L3 firewall setting: enable access to OCM RAM */
/* XXX this should be somewhere in plat-omap */
@@ -3014,7 +4175,7 @@ int dispc_init(void)
INIT_WORK(&dispc.error_work, dispc_error_worker);
- dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
+ dispc_base = dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
if (!dispc.base) {
DSSERR("can't ioremap DISPC\n");
return -ENOMEM;
@@ -3061,7 +4222,12 @@ int dispc_setup_plane(enum omap_plane plane,
enum omap_color_mode color_mode,
bool ilace,
enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror, u8 global_alpha)
+ u8 rotation, bool mirror, u8 global_alpha,
+ enum omap_channel channel
+#ifdef CONFIG_ARCH_OMAP4
+ , u32 puv_addr
+#endif
+ )
{
int r = 0;
@@ -3083,9 +4249,18 @@ int dispc_setup_plane(enum omap_plane plane,
color_mode, ilace,
rotation_type,
rotation, mirror,
- global_alpha);
+ global_alpha, channel
+#ifdef CONFIG_ARCH_OMAP4
+ , puv_addr
+#endif
+ );
enable_clocks(0);
return r;
}
+void test_out(bool enable)
+{
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 3b92b84b9560..4b7bf97d84d2 100644..100755
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -29,6 +29,8 @@
#include <linux/platform_device.h>
#include <plat/display.h>
+#include <plat/cpu.h>
+
#include "dss.h"
static LIST_HEAD(display_list);
@@ -39,7 +41,7 @@ static ssize_t display_enabled_show(struct device *dev,
struct omap_dss_device *dssdev = to_dss_device(dev);
bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
- return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
+ return snprintf(buf, PAGE_SIZE, "%s\n", enabled? "true" : "false");
}
static ssize_t display_enabled_store(struct device *dev,
@@ -316,8 +318,11 @@ void default_get_overlay_fifo_thresholds(enum omap_plane plane,
{
unsigned burst_size_bytes;
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
+ *burst_size = OMAP_DSS_BURST_16x32; /* OMAP4: same as 8x128*/
+ if (!cpu_is_omap44xx())
+ burst_size_bytes = 16 * 32 / 8;
+ else
+ burst_size_bytes = 8 * 128 / 8; /* OMAP4: highest burst size is 8x128*/
*fifo_high = fifo_size - 1;
*fifo_low = fifo_size - burst_size_bytes;
@@ -330,6 +335,10 @@ static int default_wait_vsync(struct omap_dss_device *dssdev)
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
irq = DISPC_IRQ_EVSYNC_ODD;
+ else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+ irq = DISPC_IRQ_EVSYNC_EVEN;
+ else if (dssdev->type == OMAP_DISPLAY_TYPE_DSI)
+ irq = DISPC_IRQ_FRAMEDONE;
else
irq = DISPC_IRQ_VSYNC;
@@ -356,7 +365,7 @@ static int default_get_recommended_bpp(struct omap_dss_device *dssdev)
return 16;
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
- return 24;
+ case OMAP_DISPLAY_TYPE_HDMI:
return 24;
default:
BUG();
@@ -378,6 +387,9 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
(dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0)
return false;
+ if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+ return false;
+
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
bpp = dssdev->phy.dpi.data_lines;
@@ -418,6 +430,9 @@ void dss_init_device(struct platform_device *pdev,
#ifdef CONFIG_OMAP2_DSS_VENC
case OMAP_DISPLAY_TYPE_VENC:
#endif
+#ifdef CONFIG_OMAP2_DSS_HDMI
+ case OMAP_DISPLAY_TYPE_HDMI:
+#endif
break;
default:
DSSERR("Support for display '%s' not compiled in.\n",
@@ -453,6 +468,11 @@ void dss_init_device(struct platform_device *pdev,
r = dsi_init_display(dssdev);
break;
#endif
+#ifdef CONFIG_OMAP2_DSS_HDMI
+ case OMAP_DISPLAY_TYPE_HDMI:
+ r = hdmi_init_display(dssdev);
+ break;
+#endif
default:
BUG();
}
@@ -585,7 +605,7 @@ EXPORT_SYMBOL(omap_dss_put_device);
* of from-device is decremented. */
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
{
- struct device *dev;
+ struct device *dev = NULL;
struct device *dev_start = NULL;
struct omap_dss_device *dssdev = NULL;
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 2d71031baa25..4c5b6dd0636c 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -27,35 +27,54 @@
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/io.h>
+#include <plat/board.h>
#include <plat/display.h>
#include <plat/cpu.h>
#include "dss.h"
+#define DPI2_BASE 0x58005000
+void __iomem *dpi2_base;
+
+
static struct {
int update_enabled;
} dpi;
+
+/*TODO: OMAP4: check the clock divisor mechanism? */
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
+static int dpi_set_dsi_clk(int lcd_channel_ix, bool is_tft, unsigned long pck_req,
unsigned long *fck, int *lck_div, int *pck_div)
{
struct dsi_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
int r;
-
- r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo,
+ printk("DPI set dsi clk");
+ if (!cpu_is_omap44xx()) {
+ r = dsi_pll_calc_clock_div_pck(lcd_channel_ix, is_tft, pck_req, &dsi_cinfo,
&dispc_cinfo);
if (r)
return r;
-
- r = dsi_pll_set_clock_div(&dsi_cinfo);
+ } else {
+ dispc_cinfo.lck_div = 1;
+ dispc_cinfo.pck_div = 4;
+ dsi_cinfo.regn = 19;
+ dsi_cinfo.regm = 150;
+ dsi_cinfo.regm3 = 4;
+ dsi_cinfo.regm4 = 4;
+ dsi_cinfo.use_dss2_fck = true;
+ dsi_cinfo.highfreq = 0;
+ dsi_calc_clock_rates(&dsi_cinfo);
+ }
+ r = dsi_pll_set_clock_div(lcd_channel_ix, &dsi_cinfo);
if (r)
return r;
- dss_select_clk_source(0, 1);
+ dss_select_clk_source_dsi(lcd_channel_ix, 1, 1);
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(lcd_channel_ix, &dispc_cinfo);
if (r)
return r;
@@ -66,13 +85,15 @@ static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
return 0;
}
#else
-static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
+static int dpi_set_dispc_clk(int lcd_channel_ix, bool is_tft, unsigned long pck_req,
unsigned long *fck, int *lck_div, int *pck_div)
{
struct dss_clock_info dss_cinfo;
struct dispc_clock_info dispc_cinfo;
int r;
-
+printk("dpi set dispc clk");
+/*OMAP4: check this later?*/
+ return 0;
r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo);
if (r)
return r;
@@ -81,7 +102,7 @@ static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
if (r)
return r;
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(lcd_channel_ix, &dispc_cinfo);
if (r)
return r;
@@ -100,26 +121,39 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
unsigned long fck;
unsigned long pck;
bool is_tft;
- int r = 0;
+ int r = 0, lcd_channel_ix = 0;
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ lcd_channel_ix = 1;
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
- dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
- dssdev->panel.acb);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_set_pol_freq(OMAP_DSS_CHANNEL_LCD2, dssdev->panel.config,
+ dssdev->panel.acbi, dssdev->panel.acb);
+ else
+ dispc_set_pol_freq(OMAP_DSS_CHANNEL_LCD, dssdev->panel.config,
+ dssdev->panel.acbi, dssdev->panel.acb);
+
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
+ r = dpi_set_dsi_clk(lcd_channel_ix, is_tft, t->pixel_clock * 1000,
&fck, &lck_div, &pck_div);
#else
- r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
+ r = dpi_set_dispc_clk(lcd_channel_ix, is_tft, t->pixel_clock * 1000,
&fck, &lck_div, &pck_div);
#endif
if (r)
goto err0;
+#ifndef CONFIG_ARCH_OMAP4
pck = fck / lck_div / pck_div / 1000;
+#else
+ pck = 0;
+#endif
if (pck != t->pixel_clock) {
DSSWARN("Could not find exact pixel clock. "
@@ -129,8 +163,15 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
t->pixel_clock = pck;
}
- dispc_set_lcd_timings(t);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_set_lcd_timings(OMAP_DSS_CHANNEL_LCD2, t);
+ else
+ dispc_set_lcd_timings(OMAP_DSS_CHANNEL_LCD, t);
+err2:
+ dss_select_clk_source_dsi(lcd_channel_ix, false, false);
+err1:
+ dsi_pll_uninit(lcd_channel_ix);
err0:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
return r;
@@ -142,17 +183,52 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
- dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
- OMAP_DSS_LCD_DISPLAY_STN);
- dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+ dispc_set_parallel_interface_mode(OMAP_DSS_CHANNEL_LCD2,
+ OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_set_lcd_display_type(OMAP_DSS_CHANNEL_LCD2,
+ is_tft ? OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
+ dispc_set_tft_data_lines(OMAP_DSS_CHANNEL_LCD2,
+ dssdev->phy.dpi.data_lines);
+ } else {
+ dispc_set_parallel_interface_mode(OMAP_DSS_CHANNEL_LCD,
+ OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_set_lcd_display_type(OMAP_DSS_CHANNEL_LCD,
+ is_tft ? OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
+ dispc_set_tft_data_lines(OMAP_DSS_CHANNEL_LCD,
+ dssdev->phy.dpi.data_lines);
+ }
return 0;
}
+/*This one needs to be to set the ovl info to dirty*/
+static void dpi_start_auto_update(struct omap_dss_device *dssdev)
+
+{
+ int i;
+ DSSDBG("starting auto update\n");
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager == dssdev->manager)
+ ovl->info_dirty = true;
+ printk(KERN_ERR "ovl[%d]->manager = %s", i, ovl->manager->name);
+ }
+ dssdev->manager->apply(dssdev->manager);
+}
+
static int dpi_display_enable(struct omap_dss_device *dssdev)
{
int r;
+ int val, lcd_channel_ix = 1;
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+ printk("Lcd channel index 1");
+ dpi2_base = ioremap(DPI2_BASE, 2000);
+ lcd_channel_ix = 1;
+ } else
+ lcd_channel_ix = 0;
r = omap_dss_start_device(dssdev);
if (r) {
@@ -166,15 +242,16 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2);
r = dpi_basic_init(dssdev);
if (r)
goto err2;
+
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_clk_enable(DSS_CLK_FCK2);
- r = dsi_pll_init(dssdev, 0, 1);
+
+ r = dsi_pll_init(lcd_channel_ix, dssdev, 0, 1); /*check param 2*/
if (r)
goto err3;
#endif
@@ -184,7 +261,14 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- dispc_enable_lcd_out(1);
+ if (cpu_is_omap44xx())
+ dpi_start_auto_update(dssdev);
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 1);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
+
r = dssdev->driver->enable(dssdev);
if (r)
@@ -195,10 +279,13 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
return 0;
err5:
- dispc_enable_lcd_out(0);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 0);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
err4:
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dsi_pll_uninit();
+ dsi_pll_uninit(lcd_channel_ix);
err3:
dss_clk_disable(DSS_CLK_FCK2);
#endif
@@ -214,6 +301,11 @@ static int dpi_display_resume(struct omap_dss_device *dssdev);
static void dpi_display_disable(struct omap_dss_device *dssdev)
{
+ int lcd_channel_ix = 0;
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ lcd_channel_ix = 1;
+
if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
return;
@@ -222,11 +314,14 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
dssdev->driver->disable(dssdev);
- dispc_enable_lcd_out(0);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 0);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_select_clk_source(0, 0);
- dsi_pll_uninit();
+ dss_select_clk_source_dsi(lcd_channel_ix, 0, 0);
+ dsi_pll_uninit(lcd_channel_ix);
dss_clk_disable(DSS_CLK_FCK2);
#endif
@@ -247,7 +342,10 @@ static int dpi_display_suspend(struct omap_dss_device *dssdev)
if (dssdev->driver->suspend)
dssdev->driver->suspend(dssdev);
- dispc_enable_lcd_out(0);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 0);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -265,7 +363,11 @@ static int dpi_display_resume(struct omap_dss_device *dssdev)
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
- dispc_enable_lcd_out(1);
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 1);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
if (dssdev->driver->resume)
dssdev->driver->resume(dssdev);
@@ -282,7 +384,10 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
dpi_set_mode(dssdev);
- dispc_go(OMAP_DSS_CHANNEL_LCD);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_go(OMAP_DSS_CHANNEL_LCD2);
+ else
+ dispc_go(OMAP_DSS_CHANNEL_LCD);
}
}
@@ -290,11 +395,14 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
bool is_tft;
- int r;
+ int r = 0, lcd_channel_ix = 0;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ lcd_channel_ix = 1;
+
if (!dispc_lcd_timings_ok(timings))
return -EINVAL;
@@ -303,11 +411,12 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+/*TODO: OMAP4: check the clock divisor mechanism? */
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
{
struct dsi_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
- r = dsi_pll_calc_clock_div_pck(is_tft,
+ r = dsi_pll_calc_clock_div_pck(lcd_channel_ix, is_tft,
timings->pixel_clock * 1000,
&dsi_cinfo, &dispc_cinfo);
@@ -354,10 +463,19 @@ static int dpi_display_set_update_mode(struct omap_dss_device *dssdev,
return -EINVAL;
if (mode == OMAP_DSS_UPDATE_DISABLED) {
- dispc_enable_lcd_out(0);
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 0);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
+
dpi.update_enabled = 0;
} else {
- dispc_enable_lcd_out(1);
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 1);
+ else
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
+
dpi.update_enabled = 1;
}
@@ -396,4 +514,3 @@ int dpi_init(void)
void dpi_exit(void)
{
}
-
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 5936487b5def..fc15f66beba4 100644..100755
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -32,6 +32,7 @@
#include <linux/regulator/consumer.h>
#include <linux/kthread.h>
#include <linux/wait.h>
+#include <linux/i2c/twl.h>
#include <plat/display.h>
#include <plat/clock.h>
@@ -40,15 +41,24 @@
/*#define VERBOSE_IRQ*/
#define DSI_CATCH_MISSING_TE
+#undef DEBUG
+#ifndef CONFIG_ARCH_OMAP4
#define DSI_BASE 0x4804FC00
+#else
+#define DSI_BASE 0x58004000
+#define DSI2_BASE 0x58005000
+#endif
struct dsi_reg { u16 idx; };
+struct dss_reg { u16 idx; };
#define DSI_REG(idx) ((const struct dsi_reg) { idx })
+#define DSS_REG(idx) ((const struct dss_reg) { idx })
#define DSI_SZ_REGS SZ_1K
/* DSI Protocol Engine */
+#define DSS_CONTROL DSS_REG(0x0040)
#define DSI_REVISION DSI_REG(0x0000)
#define DSI_SYSCONFIG DSI_REG(0x0010)
@@ -56,6 +66,7 @@ struct dsi_reg { u16 idx; };
#define DSI_IRQSTATUS DSI_REG(0x0018)
#define DSI_IRQENABLE DSI_REG(0x001C)
#define DSI_CTRL DSI_REG(0x0040)
+#define DSI_GNQ DSI_REG(0x0044)
#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
@@ -76,6 +87,13 @@ struct dsi_reg { u16 idx; };
#define DSI_VM_TIMING6 DSI_REG(0x008C)
#define DSI_VM_TIMING7 DSI_REG(0x0090)
#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
+#ifdef CONFIG_ARCH_OMAP4
+#define DSI_CTRL2 DSI_REG(0x0098)
+#define DSI_VM_TIMING8 DSI_REG(0x009C)
+#define DSI_TE_HSYNC_WIDTH(n) DSI_REG(0x00A0 + (n * 0xC))
+#define DSI_TE_VSYNC_WIDTH(n) DSI_REG(0x00A4 + (n * 0xC))
+#define DSI_TE_HSYNC_NUMBER(n) DSI_REG(0x00A8 + (n * 0xC))
+#endif
#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
@@ -91,6 +109,12 @@ struct dsi_reg { u16 idx; };
#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
+#ifdef CONFIG_ARCH_OMAP4
+#define DSI_DSIPHY_CFG12 DSI_REG(0x200 + 0x0030)
+#define DSI_DSIPHY_CFG14 DSI_REG(0x200 + 0x0038)
+#define DSI_DSIPHY_CFG8 DSI_REG(0x200 + 0x0020)
+#define DSI_DSIPHY_CFG9 DSI_REG(0x200 + 0x0024)
+#endif
/* DSI_PLL_CTRL_SCP */
#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
@@ -98,12 +122,18 @@ struct dsi_reg { u16 idx; };
#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
+#ifdef CONFIG_ARCH_OMAP4
+#define DSI_PLL_CONFIGURATION3 DSI_REG(0x300 + 0x0014)
+#define DSI_SSC_CONFIGURATION1 DSI_REG(0x300 + 0x0018)
+#define DSI_SSC_CONFIGURATION2 DSI_REG(0x300 + 0x001C)
+#define DSI_SSC_CONFIGURATION4 DSI_REG(0x300 + 0x0020)
+#endif
-#define REG_GET(idx, start, end) \
- FLD_GET(dsi_read_reg(idx), start, end)
+#define REG_GET(no, idx, start, end) \
+ FLD_GET(dsi_read_reg(no, idx), start, end)
-#define REG_FLD_MOD(idx, val, start, end) \
- dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
+#define REG_FLD_MOD(no, idx, val, start, end) \
+ dsi_write_reg(no, idx, FLD_MOD(dsi_read_reg(no, idx), val, start, end))
/* Global interrupts */
#define DSI_IRQ_VC0 (1 << 0)
@@ -177,12 +207,22 @@ struct dsi_reg { u16 idx; };
#define DSI_DT_RX_SHORT_READ_1 0x21
#define DSI_DT_RX_SHORT_READ_2 0x22
+#ifdef CONFIG_ARCH_OMAP4
#define FINT_MAX 2100000
#define FINT_MIN 750000
+#define REGN_MAX (1 << 8)
+#define REGM_MAX ((1 << 12) - 1)
+#define REGM3_MAX (1 << 5)
+#define REGM4_MAX (1 << 5)
+#else
+#define FINT_MAX 2500000
+#define FINT_MIN 500000
#define REGN_MAX (1 << 7)
#define REGM_MAX ((1 << 11) - 1)
#define REGM3_MAX (1 << 4)
#define REGM4_MAX (1 << 4)
+#endif
+
#define LP_DIV_MAX ((1 << 13) - 1)
enum fifo_size {
@@ -204,7 +244,7 @@ struct dsi_update_region {
struct omap_dss_device *device;
};
-static struct
+static struct dsi_struct
{
void __iomem *base;
@@ -258,24 +298,29 @@ static struct
#endif
int debug_read;
int debug_write;
-} dsi;
+} dsi_1, dsi_2;
#ifdef DEBUG
static unsigned int dsi_perf;
module_param_named(dsi_perf, dsi_perf, bool, 0644);
#endif
-static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
+
+static inline void dsi_write_reg(enum dsi lcd_ix,
+ const struct dsi_reg idx, u32 val)
{
- __raw_writel(val, dsi.base + idx.idx);
+ (lcd_ix == dsi1) ? __raw_writel(val, dsi_1.base + idx.idx) :
+ __raw_writel(val, dsi_2.base + idx.idx);
}
-static inline u32 dsi_read_reg(const struct dsi_reg idx)
+static inline u32 dsi_read_reg(enum dsi lcd_ix, const struct dsi_reg idx)
{
- return __raw_readl(dsi.base + idx.idx);
+ if (lcd_ix == dsi1)
+ return __raw_readl(dsi_1.base + idx.idx);
+ else
+ return __raw_readl(dsi_2.base + idx.idx);
}
-
void dsi_save_context(void)
{
}
@@ -284,24 +329,26 @@ void dsi_restore_context(void)
{
}
-void dsi_bus_lock(void)
+void dsi_bus_lock(enum dsi lcd_ix)
{
- mutex_lock(&dsi.bus_lock);
+ (lcd_ix == dsi1) ? mutex_lock(&dsi_1.bus_lock) :
+ mutex_lock(&dsi_2.bus_lock);
}
EXPORT_SYMBOL(dsi_bus_lock);
-void dsi_bus_unlock(void)
+void dsi_bus_unlock(enum dsi lcd_ix)
{
- mutex_unlock(&dsi.bus_lock);
+ (lcd_ix == dsi1) ? mutex_unlock(&dsi_1.bus_lock) :
+ mutex_unlock(&dsi_2.bus_lock);
}
EXPORT_SYMBOL(dsi_bus_unlock);
-static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
- int value)
+static inline int wait_for_bit_change(enum dsi lcd_ix,
+ const struct dsi_reg idx, int bitnum, int value)
{
int t = 100000;
- while (REG_GET(idx, bitnum, bitnum) != value) {
+ while (REG_GET(lcd_ix, idx, bitnum, bitnum) != value) {
if (--t == 0)
return !value;
}
@@ -310,53 +357,64 @@ static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
}
#ifdef DEBUG
-static void dsi_perf_mark_setup(void)
+static void dsi_perf_mark_setup(enum dsi lcd_ix)
{
- dsi.perf_setup_time = ktime_get();
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ p_dsi->perf_setup_time = ktime_get();
}
-static void dsi_perf_mark_start(void)
+static void dsi_perf_mark_start(enum dsi lcd_ix)
{
- dsi.perf_start_time = ktime_get();
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ p_dsi->perf_start_time = ktime_get();
}
-static void dsi_perf_mark_start_auto(void)
+static void dsi_perf_mark_start_auto(enum dsi lcd_ix)
{
- dsi.perf_measure_frames = 0;
- dsi.perf_start_time_auto = ktime_get();
-}
+ struct dsi_struct *p_dsi;
-static void dsi_perf_show(const char *name)
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ p_dsi->perf_measure_frames = 0;
+ p_dsi->perf_start_time_auto = ktime_get();
+}
+static void dsi_perf_show(enum dsi lcd_ix, const char *name)
{
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
if (!dsi_perf)
return;
- if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
+ if (p_dsi->update_mode == OMAP_DSS_UPDATE_DISABLED)
return;
t = ktime_get();
- setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
+ setup_time = ktime_sub(p_dsi->perf_start_time, p_dsi->perf_setup_time);
setup_us = (u32)ktime_to_us(setup_time);
if (setup_us == 0)
setup_us = 1;
- trans_time = ktime_sub(t, dsi.perf_start_time);
+ trans_time = ktime_sub(t, p_dsi->perf_start_time);
trans_us = (u32)ktime_to_us(trans_time);
if (trans_us == 0)
trans_us = 1;
total_us = setup_us + trans_us;
- total_bytes = dsi.active_update_region.w *
- dsi.active_update_region.h *
- dsi.active_update_region.device->ctrl.pixel_size / 8;
+ total_bytes = p_dsi->active_update_region.w *
+ p_dsi->active_update_region.h *
+ p_dsi->active_update_region.device->ctrl.pixel_size / 8;
- if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
+ if (p_dsi->update_mode == OMAP_DSS_UPDATE_AUTO) {
static u32 s_total_trans_us, s_total_setup_us;
static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
static u32 s_max_trans_us, s_max_setup_us;
@@ -364,7 +422,7 @@ static void dsi_perf_show(const char *name)
ktime_t total_time_auto;
u32 total_time_auto_us;
- dsi.perf_measure_frames++;
+ p_dsi->perf_measure_frames++;
if (setup_us < s_min_setup_us)
s_min_setup_us = setup_us;
@@ -382,10 +440,10 @@ static void dsi_perf_show(const char *name)
s_total_trans_us += trans_us;
- if (dsi.perf_measure_frames < numframes)
+ if (p_dsi->perf_measure_frames < numframes)
return;
- total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
+ total_time_auto = ktime_sub(t, p_dsi->perf_start_time_auto);
total_time_auto_us = (u32)ktime_to_us(total_time_auto);
printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
@@ -405,7 +463,7 @@ static void dsi_perf_show(const char *name)
s_total_trans_us = 0;
s_min_trans_us = 0xffffffff;
s_max_trans_us = 0;
- dsi_perf_mark_start_auto();
+ dsi_perf_mark_start_auto(lcd_ix);
} else {
printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
"%u bytes, %u kbytes/sec\n",
@@ -419,10 +477,10 @@ static void dsi_perf_show(const char *name)
}
}
#else
-#define dsi_perf_mark_setup()
-#define dsi_perf_mark_start()
-#define dsi_perf_mark_start_auto()
-#define dsi_perf_show(x)
+#define dsi_perf_mark_setup(lcd_ix)
+#define dsi_perf_mark_start(lcd_ix)
+#define dsi_perf_mark_start_auto(lcd_ix)
+#define dsi_perf_show(lcd_ix, x)
#endif
static void print_irq_status(u32 status)
@@ -520,37 +578,39 @@ static void print_irq_status_cio(u32 status)
static int debug_irq;
-/* called from dss */
-void dsi_irq_handler(void)
+static irqreturn_t dsi_irq_handler(int irq, void *arg)
{
u32 irqstatus, vcstatus, ciostatus;
int i;
+ struct dsi_struct *p_dsi;
+ enum dsi lcd_ix = dsi1;
- irqstatus = dsi_read_reg(DSI_IRQSTATUS);
+ p_dsi = &dsi_1;
+ irqstatus = dsi_read_reg(lcd_ix, DSI_IRQSTATUS);
if (irqstatus & DSI_IRQ_ERROR_MASK) {
DSSERR("DSI error, irqstatus %x\n", irqstatus);
print_irq_status(irqstatus);
- spin_lock(&dsi.errors_lock);
- dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK;
- spin_unlock(&dsi.errors_lock);
+ spin_lock(&(p_dsi->errors_lock));
+ p_dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
+ spin_unlock(&(p_dsi->errors_lock));
} else if (debug_irq) {
print_irq_status(irqstatus);
}
#ifdef DSI_CATCH_MISSING_TE
if (irqstatus & DSI_IRQ_TE_TRIGGER)
- del_timer(&dsi.te_timer);
+ del_timer(&(p_dsi->te_timer));
#endif
for (i = 0; i < 4; ++i) {
if ((irqstatus & (1<<i)) == 0)
continue;
- vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+ vcstatus = dsi_read_reg(lcd_ix, DSI_VC_IRQSTATUS(i));
if (vcstatus & DSI_VC_IRQ_BTA)
- complete(&dsi.bta_completion);
+ complete(&(p_dsi->bta_completion));
if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
@@ -560,98 +620,168 @@ void dsi_irq_handler(void)
print_irq_status_vc(i, vcstatus);
}
- dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
+ dsi_write_reg(lcd_ix, DSI_VC_IRQSTATUS(i), vcstatus);
/* flush posted write */
- dsi_read_reg(DSI_VC_IRQSTATUS(i));
+ dsi_read_reg(lcd_ix, DSI_VC_IRQSTATUS(i));
}
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
- ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+ ciostatus = dsi_read_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
+ dsi_write_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
- dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+ dsi_read_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS);
DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
print_irq_status_cio(ciostatus);
}
- dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ dsi_write_reg(lcd_ix, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
- dsi_read_reg(DSI_IRQSTATUS);
+ dsi_read_reg(lcd_ix, DSI_IRQSTATUS);
+
+ return IRQ_HANDLED;
}
+static irqreturn_t dsi2_irq_handler(int irq, void *arg)
+{
+ u32 irqstatus, vcstatus, ciostatus;
+ int i;
+ struct dsi_struct *p_dsi;
+ enum dsi lcd_ix = dsi2;
+
+ p_dsi = &dsi_2;
+ irqstatus = dsi_read_reg(lcd_ix, DSI_IRQSTATUS);
-static void _dsi_initialize_irq(void)
+ if (irqstatus & DSI_IRQ_ERROR_MASK) {
+ DSSERR("DSI error, irqstatus %x\n", irqstatus);
+ print_irq_status(irqstatus);
+ spin_lock(&(p_dsi->errors_lock));
+ p_dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
+ spin_unlock(&(p_dsi->errors_lock));
+ } else if (debug_irq) {
+ print_irq_status(irqstatus);
+ }
+
+#ifdef DSI_CATCH_MISSING_TE
+ if (irqstatus & DSI_IRQ_TE_TRIGGER)
+ del_timer(&(p_dsi->te_timer));
+#endif
+
+ for (i = 0; i < 4; ++i) {
+ if ((irqstatus & (1<<i)) == 0)
+ continue;
+
+ vcstatus = dsi_read_reg(lcd_ix, DSI_VC_IRQSTATUS(i));
+
+ if (vcstatus & DSI_VC_IRQ_BTA)
+ complete(&(p_dsi->bta_completion));
+
+ if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
+ DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
+ i, vcstatus);
+ print_irq_status_vc(i, vcstatus);
+ } else if (debug_irq) {
+ print_irq_status_vc(i, vcstatus);
+ }
+
+ dsi_write_reg(lcd_ix, DSI_VC_IRQSTATUS(i), vcstatus);
+ /* flush posted write */
+ dsi_read_reg(lcd_ix, DSI_VC_IRQSTATUS(i));
+ }
+
+ if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
+ ciostatus = dsi_read_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS);
+
+ dsi_write_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
+ /* flush posted write */
+ dsi_read_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS);
+
+ DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+ print_irq_status_cio(ciostatus);
+ }
+
+ dsi_write_reg(lcd_ix, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ /* flush posted write */
+ dsi_read_reg(lcd_ix, DSI_IRQSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void _dsi_initialize_irq(enum dsi lcd_ix)
{
u32 l;
int i;
/* disable all interrupts */
- dsi_write_reg(DSI_IRQENABLE, 0);
+ dsi_write_reg(lcd_ix, DSI_IRQENABLE, 0);
for (i = 0; i < 4; ++i)
- dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
+ dsi_write_reg(lcd_ix, DSI_VC_IRQENABLE(i), 0);
+ dsi_write_reg(lcd_ix, DSI_COMPLEXIO_IRQ_ENABLE, 0);
/* clear interrupt status */
- l = dsi_read_reg(DSI_IRQSTATUS);
- dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
+ l = dsi_read_reg(lcd_ix, DSI_IRQSTATUS);
+ dsi_write_reg(lcd_ix, DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
for (i = 0; i < 4; ++i) {
- l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
- dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
+ l = dsi_read_reg(lcd_ix, DSI_VC_IRQSTATUS(i));
+ dsi_write_reg(lcd_ix, DSI_VC_IRQSTATUS(i), l);
}
- l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
+ l = dsi_read_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS);
+ dsi_write_reg(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS, l);
/* enable error irqs */
l = DSI_IRQ_ERROR_MASK;
#ifdef DSI_CATCH_MISSING_TE
l |= DSI_IRQ_TE_TRIGGER;
#endif
- dsi_write_reg(DSI_IRQENABLE, l);
+ dsi_write_reg(lcd_ix, DSI_IRQENABLE, l);
l = DSI_VC_IRQ_ERROR_MASK;
for (i = 0; i < 4; ++i)
- dsi_write_reg(DSI_VC_IRQENABLE(i), l);
+ dsi_write_reg(lcd_ix, DSI_VC_IRQENABLE(i), l);
/* XXX zonda responds incorrectly, causing control error:
Exit from LP-ESC mode to LP11 uses wrong transition states on the
data lines LP0 and LN0. */
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
- -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
+ dsi_write_reg(lcd_ix, DSI_COMPLEXIO_IRQ_ENABLE,
+ -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
}
-static u32 dsi_get_errors(void)
+static u32 dsi_get_errors(enum dsi lcd_ix)
{
unsigned long flags;
u32 e;
- spin_lock_irqsave(&dsi.errors_lock, flags);
- e = dsi.errors;
- dsi.errors = 0;
- spin_unlock_irqrestore(&dsi.errors_lock, flags);
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ spin_lock_irqsave(&(p_dsi->errors_lock), flags);
+ e = p_dsi->errors;
+ p_dsi->errors = 0;
+ spin_unlock_irqrestore(&(p_dsi->errors_lock), flags);
return e;
}
-static void dsi_vc_enable_bta_irq(int channel)
+static void dsi_vc_enable_bta_irq(enum dsi lcd_ix, int channel)
{
u32 l;
- dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
+ dsi_write_reg(lcd_ix, DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
- l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
+ l = dsi_read_reg(lcd_ix, DSI_VC_IRQENABLE(channel));
l |= DSI_VC_IRQ_BTA;
- dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+ dsi_write_reg(lcd_ix, DSI_VC_IRQENABLE(channel), l);
}
-static void dsi_vc_disable_bta_irq(int channel)
+static void dsi_vc_disable_bta_irq(enum dsi lcd_ix, int channel)
{
u32 l;
- l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
+ l = dsi_read_reg(lcd_ix, DSI_VC_IRQENABLE(channel));
l &= ~DSI_VC_IRQ_BTA;
- dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+ dsi_write_reg(lcd_ix, DSI_VC_IRQENABLE(channel), l);
}
/* DSI func clock. this could also be DSI2_PLL_FCLK */
@@ -664,21 +794,23 @@ static inline void enable_clocks(bool enable)
}
/* source clock for DSI PLL. this could also be PCLKFREE */
-static inline void dsi_enable_pll_clock(bool enable)
+static inline void dsi_enable_pll_clock(enum dsi lcd_ix, bool enable)
{
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
if (enable)
dss_clk_enable(DSS_CLK_FCK2);
else
dss_clk_disable(DSS_CLK_FCK2);
- if (enable && dsi.pll_locked) {
- if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
+ if (enable && p_dsi->pll_locked) {
+ if (wait_for_bit_change(lcd_ix, DSI_PLL_STATUS, 1, 1) != 1)
DSSERR("cannot lock PLL when enabling clocks\n");
}
}
#ifdef DEBUG
-static void _dsi_print_reset_status(void)
+static void _dsi_print_reset_status(enum dsi lcd_ix)
{
u32 l;
@@ -688,35 +820,36 @@ static void _dsi_print_reset_status(void)
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- l = dsi_read_reg(DSI_DSIPHY_CFG5);
+ l = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG5);
printk(KERN_DEBUG "DSI resets: ");
- l = dsi_read_reg(DSI_PLL_STATUS);
+ l = dsi_read_reg(lcd_ix, DSI_PLL_STATUS);
printk("PLL (%d) ", FLD_GET(l, 0, 0));
- l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+ l = dsi_read_reg(lcd_ix, DSI_COMPLEXIO_CFG1);
printk("CIO (%d) ", FLD_GET(l, 29, 29));
- l = dsi_read_reg(DSI_DSIPHY_CFG5);
+ l = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG5);
printk("PHY (%x, %d, %d, %d)\n",
FLD_GET(l, 28, 26),
FLD_GET(l, 29, 29),
FLD_GET(l, 30, 30),
FLD_GET(l, 31, 31));
+
}
#else
-#define _dsi_print_reset_status()
+#define _dsi_print_reset_status(lcd_ix)
#endif
-static inline int dsi_if_enable(bool enable)
+static inline int dsi_if_enable(enum dsi lcd_ix, bool enable)
{
- DSSDBG("dsi_if_enable(%d)\n", enable);
+ DSSDBG("dsi_if_enable(%d, %d)\n", lcd_ix, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
+ REG_FLD_MOD(lcd_ix, DSI_CTRL, enable, 0, 0); /* IF_EN */
- if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
+ if (wait_for_bit_change(lcd_ix, DSI_CTRL, 0, enable) != enable) {
DSSERR("Failed to set dsi_if_enable to %d\n", enable);
return -EIO;
}
@@ -724,22 +857,28 @@ static inline int dsi_if_enable(bool enable)
return 0;
}
-unsigned long dsi_get_dsi1_pll_rate(void)
+unsigned long dsi_get_dsi1_pll_rate(enum dsi lcd_ix)
{
- return dsi.current_cinfo.dsi1_pll_fclk;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ return p_dsi->current_cinfo.dsi1_pll_fclk;
}
-static unsigned long dsi_get_dsi2_pll_rate(void)
+static unsigned long dsi_get_dsi2_pll_rate(enum dsi lcd_ix)
{
- return dsi.current_cinfo.dsi2_pll_fclk;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ return p_dsi->current_cinfo.dsi2_pll_fclk;
}
-static unsigned long dsi_get_txbyteclkhs(void)
+static unsigned long dsi_get_txbyteclkhs(enum dsi lcd_ix)
{
- return dsi.current_cinfo.clkin4ddr / 16;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ return p_dsi->current_cinfo.clkin4ddr / 16;
}
-static unsigned long dsi_fclk_rate(void)
+static unsigned long dsi_fclk_rate(enum dsi lcd_ix)
{
unsigned long r;
@@ -748,7 +887,7 @@ static unsigned long dsi_fclk_rate(void)
r = dss_clk_get_rate(DSS_CLK_FCK1);
} else {
/* DSI FCLK source is DSI2_PLL_FCLK */
- r = dsi_get_dsi2_pll_rate();
+ r = dsi_get_dsi2_pll_rate(lcd_ix);
}
return r;
@@ -759,23 +898,28 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
unsigned long dsi_fclk;
unsigned lp_clk_div;
unsigned long lp_clk;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
return -EINVAL;
- dsi_fclk = dsi_fclk_rate();
+ dsi_fclk = dsi_fclk_rate(lcd_ix);
lp_clk = dsi_fclk / 2 / lp_clk_div;
DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
- dsi.current_cinfo.lp_clk = lp_clk;
- dsi.current_cinfo.lp_clk_div = lp_clk_div;
+ p_dsi->current_cinfo.lp_clk = lp_clk;
+ p_dsi->current_cinfo.lp_clk_div = lp_clk_div;
- REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */
+ REG_FLD_MOD(lcd_ix, DSI_CLK_CTRL, lp_clk_div,
+ 12, 0); /* LP_CLK_DIVISOR */
- REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0,
+ REG_FLD_MOD(lcd_ix, DSI_CLK_CTRL, 1,
21, 21); /* LP_RX_SYNCHRO_ENABLE */
return 0;
@@ -789,14 +933,14 @@ enum dsi_pll_power_state {
DSI_PLL_POWER_ON_DIV = 0x3,
};
-static int dsi_pll_power(enum dsi_pll_power_state state)
+static int dsi_pll_power(enum dsi lcd_ix, enum dsi_pll_power_state state)
{
int t = 0;
- REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
+ REG_FLD_MOD(lcd_ix, DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
/* PLL_PWR_STATUS */
- while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
+ while (FLD_GET(dsi_read_reg(lcd_ix, DSI_CLK_CTRL), 29, 28) != state) {
udelay(1);
if (t++ > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
@@ -809,7 +953,7 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
}
/* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
+int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
{
if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
return -EINVAL;
@@ -823,13 +967,20 @@ static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
if (cinfo->regm4 > REGM4_MAX)
return -EINVAL;
- if (cinfo->use_dss2_fck) {
+ if (cinfo->use_dss2_sys_clk) {
+ /* We have hardcoded the value of SYS_CLK
+ * till the time HWMOD framework isnt available*/
+ cinfo->clkin = 26000000;
+ cinfo->highfreq = 0;
+ } else if (cinfo->use_dss2_fck) {
+
cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
/* XXX it is unclear if highfreq should be used
* with DSS2_FCK source also */
cinfo->highfreq = 0;
} else {
- cinfo->clkin = dispc_pclk_rate();
+
+ cinfo->clkin = dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD);
if (cinfo->clkin < 32000000)
cinfo->highfreq = 0;
@@ -860,8 +1011,8 @@ static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
return 0;
}
-int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
- struct dsi_clock_info *dsi_cinfo,
+int dsi_pll_calc_clock_div_pck(enum dsi lcd_ix, bool is_tft,
+ unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
struct dsi_clock_info cur, best;
@@ -869,13 +1020,15 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
int min_fck_per_pck;
int match = 0;
unsigned long dss_clk_fck2;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
- if (req_pck == dsi.cache_req_pck &&
- dsi.cache_cinfo.clkin == dss_clk_fck2) {
+ if (req_pck == p_dsi->cache_req_pck &&
+ p_dsi->cache_cinfo.clkin == dss_clk_fck2) {
DSSDBG("DSI clock info found from cache\n");
- *dsi_cinfo = dsi.cache_cinfo;
+ *dsi_cinfo = p_dsi->cache_cinfo;
dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
dispc_cinfo);
return 0;
@@ -977,39 +1130,46 @@ found:
return -EINVAL;
}
- /* DSI2_PLL_FCLK (regm4) is not used */
- best.regm4 = 0;
- best.dsi2_pll_fclk = 0;
+ /* DSI2_PLL_FCLK (regm4) is not used. Set it to something sane. */
+ best.regm4 = best.clkin4ddr / 48000000;
+ if (best.regm4 > REGM4_MAX)
+ best.regm4 = REGM4_MAX;
+ else if (best.regm4 == 0)
+ best.regm4 = 1;
+ best.dsi2_pll_fclk = best.clkin4ddr / best.regm4;
if (dsi_cinfo)
*dsi_cinfo = best;
if (dispc_cinfo)
*dispc_cinfo = best_dispc;
- dsi.cache_req_pck = req_pck;
- dsi.cache_clk_freq = 0;
- dsi.cache_cinfo = best;
+ p_dsi->cache_req_pck = req_pck;
+ p_dsi->cache_clk_freq = 0;
+ p_dsi->cache_cinfo = best;
return 0;
}
-int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
+int dsi_pll_set_clock_div(enum dsi lcd_ix, struct dsi_clock_info *cinfo)
{
int r = 0;
u32 l;
- int f;
+ int f = 0;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBGF();
- dsi.current_cinfo.fint = cinfo->fint;
- dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
- dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
- dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
+ p_dsi->current_cinfo.fint = cinfo->fint;
+ p_dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
+ p_dsi->current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
+ p_dsi->current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
- dsi.current_cinfo.regn = cinfo->regn;
- dsi.current_cinfo.regm = cinfo->regm;
- dsi.current_cinfo.regm3 = cinfo->regm3;
- dsi.current_cinfo.regm4 = cinfo->regm4;
+ p_dsi->current_cinfo.regn = cinfo->regn;
+ p_dsi->current_cinfo.regm = cinfo->regm;
+ p_dsi->current_cinfo.regm3 = cinfo->regm3;
+ p_dsi->current_cinfo.regm4 = cinfo->regm4;
DSSDBG("DSI Fint %ld\n", cinfo->fint);
@@ -1036,98 +1196,122 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
cinfo->regm4, cinfo->dsi2_pll_fclk);
- REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
+ REG_FLD_MOD(lcd_ix, DSI_PLL_CONTROL, 0, 0,
+ 0); /* DSI_PLL_AUTOMODE = manual */
- l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
+ l = dsi_read_reg(lcd_ix, DSI_PLL_CONFIGURATION1);
l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
- l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
- l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
+ l = FLD_MOD(l, cinfo->regn - 1, (cpu_is_omap44xx()) ? 8 : 7,
+ 1); /* DSI_PLL_REGN */
+ l = FLD_MOD(l, cinfo->regm, (cpu_is_omap44xx()) ? 20 : 18,
+ (cpu_is_omap44xx()) ? 9 : 8); /* DSI_PLL_REGM */
l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
- 22, 19); /* DSI_CLOCK_DIV */
+ (cpu_is_omap44xx()) ? 25 : 22,
+ (cpu_is_omap44xx()) ? 21 : 19); /* M4_CLK_DIV */
l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
- 26, 23); /* DSIPROTO_CLOCK_DIV */
- dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
-
- BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
- if (cinfo->fint < 1000000)
- f = 0x3;
- else if (cinfo->fint < 1250000)
- f = 0x4;
- else if (cinfo->fint < 1500000)
- f = 0x5;
- else if (cinfo->fint < 1750000)
- f = 0x6;
- else
- f = 0x7;
+ (cpu_is_omap44xx()) ? 30 : 26,
+ (cpu_is_omap44xx()) ? 26 : 23); /* M5_CLK_DIV */
+ dsi_write_reg(lcd_ix, DSI_PLL_CONFIGURATION1, l);
+
+ BUG_ON(cinfo->fint < FINT_MIN || cinfo->fint > FINT_MAX);
+
+ if (cpu_is_omap34xx()) {
+ if (cinfo->fint < 1000000)
+ f = 0x3;
+ else if (cinfo->fint < 1250000)
+ f = 0x4;
+ else if (cinfo->fint < 1500000)
+ f = 0x5;
+ else if (cinfo->fint < 1750000)
+ f = 0x6;
+ else
+ f = 0x7;
+ }
- l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
- l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
+ l = dsi_read_reg(lcd_ix, DSI_PLL_CONFIGURATION2);
+ if (cpu_is_omap34xx())
+ l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
- 11, 11); /* DSI_PLL_CLKSEL */
+ 11, 11); /* DSI_PLL_CLKSEL */
l = FLD_MOD(l, cinfo->highfreq,
- 12, 12); /* DSI_PLL_HIGHFREQ */
+ 12, 12); /* DSI_PLL_HIGHFREQ */
l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
- dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
+ if (cpu_is_omap44xx())
+ l = FLD_MOD(l, 3, 22, 21); /* DSI_REF_SEL */
+ dsi_write_reg(lcd_ix, DSI_PLL_CONFIGURATION2, l);
- REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
+ REG_FLD_MOD(lcd_ix, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
- if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
+ if (wait_for_bit_change(lcd_ix, DSI_PLL_GO, 0, 0) != 0) {
DSSERR("dsi pll go bit not going down.\n");
r = -EIO;
goto err;
}
- if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
+ if (wait_for_bit_change(lcd_ix, DSI_PLL_STATUS, 1, 1) != 1) {
DSSERR("cannot lock PLL\n");
r = -EIO;
goto err;
}
- dsi.pll_locked = 1;
+ p_dsi->pll_locked = 1;
- l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
+ l = dsi_read_reg(lcd_ix, DSI_PLL_CONFIGURATION2);
l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
- l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
+ if (cpu_is_omap34xx())
+ l = FLD_MOD(l, 0, 7, 7);/* DSI_PLL_TIGHTPHASELOCK */
l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
- l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
- l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
- l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
- l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
+ l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN / M4_CLK_EN */
+ l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN / M4_CLK_PWDN */
+ l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN / M5_CLK_EN */
+ l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN / M5_CLK_PWDN */
l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
- dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
+ if (cpu_is_omap44xx()) {
+ l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
+ l = FLD_MOD(l, 0, 26, 26); /* M7_CLOCK_PWDN */
+ }
+ dsi_write_reg(lcd_ix, DSI_PLL_CONFIGURATION2, l);
DSSDBG("PLL config done\n");
err:
return r;
}
-int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
- bool enable_hsdiv)
+int dsi_pll_init(enum dsi lcd_ix, struct omap_dss_device *dssdev,
+ bool enable_hsclk, bool enable_hsdiv)
{
int r = 0;
enum dsi_pll_power_state pwstate;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBG("PLL init\n");
+ REG_FLD_MOD(lcd_ix, DSI_CLK_CTRL, 1, 14, 14);
enable_clocks(1);
- dsi_enable_pll_clock(1);
+ dsi_enable_pll_clock(lcd_ix, 1);
- r = regulator_enable(dsi.vdds_dsi_reg);
- if (r)
- goto err0;
+ if (cpu_is_omap34xx()) {
+ r = regulator_enable(p_dsi->vdds_dsi_reg);
+ if (r)
+ goto err0;
+ }
/* XXX PLL does not come out of reset without this... */
- dispc_pck_free_enable(1);
- if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
+ if (cpu_is_omap34xx())
+ dispc_pck_free_enable(1);
+
+
+ if (wait_for_bit_change(lcd_ix, DSI_PLL_STATUS, 0, 1) != 1) {
DSSERR("PLL not coming out of reset.\n");
r = -ENODEV;
goto err1;
@@ -1135,18 +1319,24 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
/* XXX ... but if left on, we get problems when planes do not
* fill the whole display. No idea about this */
- dispc_pck_free_enable(0);
-
- if (enable_hsclk && enable_hsdiv)
- pwstate = DSI_PLL_POWER_ON_ALL;
- else if (enable_hsclk)
- pwstate = DSI_PLL_POWER_ON_HSCLK;
- else if (enable_hsdiv)
- pwstate = DSI_PLL_POWER_ON_DIV;
- else
- pwstate = DSI_PLL_POWER_OFF;
+ if (cpu_is_omap34xx()) {
+
+ dispc_pck_free_enable(0);
- r = dsi_pll_power(pwstate);
+ if (enable_hsclk && enable_hsdiv)
+ pwstate = DSI_PLL_POWER_ON_ALL;
+ else if (enable_hsclk)
+ pwstate = DSI_PLL_POWER_ON_HSCLK;
+ else if (enable_hsdiv)
+ pwstate = DSI_PLL_POWER_ON_DIV;
+ else
+ pwstate = DSI_PLL_POWER_OFF;
+
+ } else {
+ pwstate = DSI_PLL_POWER_ON_ALL;
+ }
+
+ r = dsi_pll_power(lcd_ix, pwstate);
if (r)
goto err1;
@@ -1154,33 +1344,45 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
DSSDBG("PLL init done\n");
return 0;
+
err1:
- regulator_disable(dsi.vdds_dsi_reg);
+
+ if (cpu_is_omap34xx())
+ regulator_disable(p_dsi->vdds_dsi_reg);
+
err0:
enable_clocks(0);
- dsi_enable_pll_clock(0);
+ dsi_enable_pll_clock(lcd_ix, 0);
return r;
}
-void dsi_pll_uninit(void)
+void dsi_pll_uninit(enum dsi lcd_ix)
{
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
enable_clocks(0);
- dsi_enable_pll_clock(0);
+ dsi_enable_pll_clock(lcd_ix, 0);
+
+ p_dsi->pll_locked = 0;
+ dsi_pll_power(lcd_ix, DSI_PLL_POWER_OFF);
+
+ if (cpu_is_omap34xx())
+ regulator_disable(p_dsi->vdds_dsi_reg);
- dsi.pll_locked = 0;
- dsi_pll_power(DSI_PLL_POWER_OFF);
- regulator_disable(dsi.vdds_dsi_reg);
DSSDBG("PLL uninit done\n");
}
-void dsi_dump_clocks(struct seq_file *s)
+void dsi_dump_clocks(enum dsi lcd_ix, struct seq_file *s)
{
int clksel;
- struct dsi_clock_info *cinfo = &dsi.current_cinfo;
+ struct dsi_struct *p_dsi;
+ struct dsi_clock_info *cinfo;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ cinfo = &p_dsi->current_cinfo;
enable_clocks(1);
- clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
+ clksel = REG_GET(lcd_ix, DSI_PLL_CONFIGURATION2, 11, 11);
seq_printf(s, "- DSI PLL -\n");
@@ -1209,98 +1411,99 @@ void dsi_dump_clocks(struct seq_file *s)
dss_get_dsi_clk_source() == 0 ?
"dss1_alwon_fclk" : "dsi2_pll_fclk");
- seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
+ seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(lcd_ix));
seq_printf(s, "DDR_CLK\t\t%lu\n",
cinfo->clkin4ddr / 4);
- seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs());
+ seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(lcd_ix));
seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
seq_printf(s, "VP_CLK\t\t%lu\n"
"VP_PCLK\t\t%lu\n",
- dispc_lclk_rate(),
- dispc_pclk_rate());
+ dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD),
+ dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD));
enable_clocks(0);
}
-void dsi_dump_regs(struct seq_file *s)
+void dsi_dump_regs(enum dsi lcd_ix, struct seq_file *s)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
+#define DUMPREG(n, r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(n, r))
+
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
- DUMPREG(DSI_REVISION);
- DUMPREG(DSI_SYSCONFIG);
- DUMPREG(DSI_SYSSTATUS);
- DUMPREG(DSI_IRQSTATUS);
- DUMPREG(DSI_IRQENABLE);
- DUMPREG(DSI_CTRL);
- DUMPREG(DSI_COMPLEXIO_CFG1);
- DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
- DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
- DUMPREG(DSI_CLK_CTRL);
- DUMPREG(DSI_TIMING1);
- DUMPREG(DSI_TIMING2);
- DUMPREG(DSI_VM_TIMING1);
- DUMPREG(DSI_VM_TIMING2);
- DUMPREG(DSI_VM_TIMING3);
- DUMPREG(DSI_CLK_TIMING);
- DUMPREG(DSI_TX_FIFO_VC_SIZE);
- DUMPREG(DSI_RX_FIFO_VC_SIZE);
- DUMPREG(DSI_COMPLEXIO_CFG2);
- DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
- DUMPREG(DSI_VM_TIMING4);
- DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
- DUMPREG(DSI_VM_TIMING5);
- DUMPREG(DSI_VM_TIMING6);
- DUMPREG(DSI_VM_TIMING7);
- DUMPREG(DSI_STOPCLK_TIMING);
-
- DUMPREG(DSI_VC_CTRL(0));
- DUMPREG(DSI_VC_TE(0));
- DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
- DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
- DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
- DUMPREG(DSI_VC_IRQSTATUS(0));
- DUMPREG(DSI_VC_IRQENABLE(0));
-
- DUMPREG(DSI_VC_CTRL(1));
- DUMPREG(DSI_VC_TE(1));
- DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
- DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
- DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
- DUMPREG(DSI_VC_IRQSTATUS(1));
- DUMPREG(DSI_VC_IRQENABLE(1));
-
- DUMPREG(DSI_VC_CTRL(2));
- DUMPREG(DSI_VC_TE(2));
- DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
- DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
- DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
- DUMPREG(DSI_VC_IRQSTATUS(2));
- DUMPREG(DSI_VC_IRQENABLE(2));
-
- DUMPREG(DSI_VC_CTRL(3));
- DUMPREG(DSI_VC_TE(3));
- DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
- DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
- DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
- DUMPREG(DSI_VC_IRQSTATUS(3));
- DUMPREG(DSI_VC_IRQENABLE(3));
-
- DUMPREG(DSI_DSIPHY_CFG0);
- DUMPREG(DSI_DSIPHY_CFG1);
- DUMPREG(DSI_DSIPHY_CFG2);
- DUMPREG(DSI_DSIPHY_CFG5);
-
- DUMPREG(DSI_PLL_CONTROL);
- DUMPREG(DSI_PLL_STATUS);
- DUMPREG(DSI_PLL_GO);
- DUMPREG(DSI_PLL_CONFIGURATION1);
- DUMPREG(DSI_PLL_CONFIGURATION2);
+ DUMPREG(lcd_ix, DSI_REVISION);
+ DUMPREG(lcd_ix, DSI_SYSCONFIG);
+ DUMPREG(lcd_ix, DSI_SYSSTATUS);
+ DUMPREG(lcd_ix, DSI_IRQSTATUS);
+ DUMPREG(lcd_ix, DSI_IRQENABLE);
+ DUMPREG(lcd_ix, DSI_CTRL);
+ DUMPREG(lcd_ix, DSI_COMPLEXIO_CFG1);
+ DUMPREG(lcd_ix, DSI_COMPLEXIO_IRQ_STATUS);
+ DUMPREG(lcd_ix, DSI_COMPLEXIO_IRQ_ENABLE);
+ DUMPREG(lcd_ix, DSI_CLK_CTRL);
+ DUMPREG(lcd_ix, DSI_TIMING1);
+ DUMPREG(lcd_ix, DSI_TIMING2);
+ DUMPREG(lcd_ix, DSI_VM_TIMING1);
+ DUMPREG(lcd_ix, DSI_VM_TIMING2);
+ DUMPREG(lcd_ix, DSI_VM_TIMING3);
+ DUMPREG(lcd_ix, DSI_CLK_TIMING);
+ DUMPREG(lcd_ix, DSI_TX_FIFO_VC_SIZE);
+ DUMPREG(lcd_ix, DSI_RX_FIFO_VC_SIZE);
+ DUMPREG(lcd_ix, DSI_COMPLEXIO_CFG2);
+ DUMPREG(lcd_ix, DSI_RX_FIFO_VC_FULLNESS);
+ DUMPREG(lcd_ix, DSI_VM_TIMING4);
+ DUMPREG(lcd_ix, DSI_TX_FIFO_VC_EMPTINESS);
+ DUMPREG(lcd_ix, DSI_VM_TIMING5);
+ DUMPREG(lcd_ix, DSI_VM_TIMING6);
+ DUMPREG(lcd_ix, DSI_VM_TIMING7);
+ DUMPREG(lcd_ix, DSI_STOPCLK_TIMING);
+
+ DUMPREG(lcd_ix, DSI_VC_CTRL(0));
+ DUMPREG(lcd_ix, DSI_VC_TE(0));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_HEADER(0));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_PAYLOAD(0));
+ DUMPREG(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(0));
+ DUMPREG(lcd_ix, DSI_VC_IRQSTATUS(0));
+ DUMPREG(lcd_ix, DSI_VC_IRQENABLE(0));
+
+ DUMPREG(lcd_ix, DSI_VC_CTRL(1));
+ DUMPREG(lcd_ix, DSI_VC_TE(1));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_HEADER(1));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_PAYLOAD(1));
+ DUMPREG(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(1));
+ DUMPREG(lcd_ix, DSI_VC_IRQSTATUS(1));
+ DUMPREG(lcd_ix, DSI_VC_IRQENABLE(1));
+
+ DUMPREG(lcd_ix, DSI_VC_CTRL(2));
+ DUMPREG(lcd_ix, DSI_VC_TE(2));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_HEADER(2));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_PAYLOAD(2));
+ DUMPREG(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(2));
+ DUMPREG(lcd_ix, DSI_VC_IRQSTATUS(2));
+ DUMPREG(lcd_ix, DSI_VC_IRQENABLE(2));
+
+ DUMPREG(lcd_ix, DSI_VC_CTRL(3));
+ DUMPREG(lcd_ix, DSI_VC_TE(3));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_HEADER(3));
+ DUMPREG(lcd_ix, DSI_VC_LONG_PACKET_PAYLOAD(3));
+ DUMPREG(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(3));
+ DUMPREG(lcd_ix, DSI_VC_IRQSTATUS(3));
+ DUMPREG(lcd_ix, DSI_VC_IRQENABLE(3));
+
+ DUMPREG(lcd_ix, DSI_DSIPHY_CFG0);
+ DUMPREG(lcd_ix, DSI_DSIPHY_CFG1);
+ DUMPREG(lcd_ix, DSI_DSIPHY_CFG2);
+ DUMPREG(lcd_ix, DSI_DSIPHY_CFG5);
+
+ DUMPREG(lcd_ix, DSI_PLL_CONTROL);
+ DUMPREG(lcd_ix, DSI_PLL_STATUS);
+ DUMPREG(lcd_ix, DSI_PLL_GO);
+ DUMPREG(lcd_ix, DSI_PLL_CONFIGURATION1);
+ DUMPREG(lcd_ix, DSI_PLL_CONFIGURATION2);
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
#undef DUMPREG
@@ -1312,19 +1515,25 @@ enum dsi_complexio_power_state {
DSI_COMPLEXIO_POWER_ULPS = 0x2,
};
-static int dsi_complexio_power(enum dsi_complexio_power_state state)
+static int dsi_complexio_power(enum dsi lcd_ix,
+ enum dsi_complexio_power_state state)
{
int t = 0;
/* PWR_CMD */
- REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
+ REG_FLD_MOD(lcd_ix, DSI_COMPLEXIO_CFG1, state, 28, 27);
+
+ if (cpu_is_omap44xx())
+ /*bit 30 has to be set to 1 to GO in omap4*/
+ REG_FLD_MOD(lcd_ix, DSI_COMPLEXIO_CFG1, 1, 30, 30);
/* PWR_STATUS */
- while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
+ while (FLD_GET(dsi_read_reg(lcd_ix, DSI_COMPLEXIO_CFG1),
+ 26, 25) != state) {
udelay(1);
- if (t++ > 1000) {
+ if (t++ > 10000) {
DSSERR("failed to set complexio power state to "
- "%d\n", state);
+ "%d\n", state);
return -ENODEV;
}
}
@@ -1335,6 +1544,7 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state)
static void dsi_complexio_config(struct omap_dss_device *dssdev)
{
u32 r;
+ enum dsi lcd_ix;
int clk_lane = dssdev->phy.dsi.clk_lane;
int data1_lane = dssdev->phy.dsi.data1_lane;
@@ -1343,14 +1553,15 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev)
int data1_pol = dssdev->phy.dsi.data1_pol;
int data2_pol = dssdev->phy.dsi.data2_pol;
- r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ r = dsi_read_reg(lcd_ix, DSI_COMPLEXIO_CFG1);
r = FLD_MOD(r, clk_lane, 2, 0);
r = FLD_MOD(r, clk_pol, 3, 3);
r = FLD_MOD(r, data1_lane, 6, 4);
r = FLD_MOD(r, data1_pol, 7, 7);
r = FLD_MOD(r, data2_lane, 10, 8);
r = FLD_MOD(r, data2_pol, 11, 11);
- dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
+ dsi_write_reg(lcd_ix, DSI_COMPLEXIO_CFG1, r);
/* The configuration of the DSI complex I/O (number of data lanes,
position, differential order) should not be changed while
@@ -1371,130 +1582,149 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev)
*/
}
-static inline unsigned ns2ddr(unsigned ns)
+static inline unsigned ns2ddr(enum dsi lcd_ix, unsigned ns)
{
+ unsigned long ddr_clk;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
/* convert time in ns to ddr ticks, rounding up */
- unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
- return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
+ ddr_clk = p_dsi->current_cinfo.clkin4ddr / 4;
+ return (ns * (ddr_clk/1000/1000) + 999) / 1000;
}
-static inline unsigned ddr2ns(unsigned ddr)
+static inline unsigned ddr2ns(enum dsi lcd_ix, unsigned ddr)
{
- unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
+ struct dsi_struct *p_dsi;
+ unsigned long ddr_clk;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ ddr_clk = p_dsi->current_cinfo.clkin4ddr / 4;
return ddr * 1000 * 1000 / (ddr_clk / 1000);
}
-static void dsi_complexio_timings(void)
+static void dsi_complexio_timings(enum dsi lcd_ix)
{
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
u32 tclk_prepare;
-
/* calculate timings */
/* 1 * DDR_CLK = 2 * UI */
/* min 40ns + 4*UI max 85ns + 6*UI */
- ths_prepare = ns2ddr(70) + 2;
+ ths_prepare = ns2ddr(lcd_ix, 70) + 2;
/* min 145ns + 10*UI */
- ths_prepare_ths_zero = ns2ddr(175) + 2;
+ ths_prepare_ths_zero = ns2ddr(lcd_ix, 175) + 2;
/* min max(8*UI, 60ns+4*UI) */
- ths_trail = ns2ddr(60) + 5;
+ ths_trail = ns2ddr(lcd_ix, 60) + 5;
/* min 100ns */
- ths_exit = ns2ddr(145);
+ ths_exit = ns2ddr(lcd_ix, 145);
/* tlpx min 50n */
- tlpx_half = ns2ddr(25);
+ tlpx_half = ns2ddr(lcd_ix, 25);
/* min 60ns */
- tclk_trail = ns2ddr(60) + 2;
+ tclk_trail = ns2ddr(lcd_ix, 60) + 2;
/* min 38ns, max 95ns */
- tclk_prepare = ns2ddr(65);
+ tclk_prepare = ns2ddr(lcd_ix, 65);
/* min tclk-prepare + tclk-zero = 300ns */
- tclk_zero = ns2ddr(260);
+ tclk_zero = ns2ddr(lcd_ix, 260);
DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
- ths_prepare, ddr2ns(ths_prepare),
- ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
+ ths_prepare, ddr2ns(lcd_ix, ths_prepare),
+ ths_prepare_ths_zero, ddr2ns(lcd_ix, ths_prepare_ths_zero));
DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
- ths_trail, ddr2ns(ths_trail),
- ths_exit, ddr2ns(ths_exit));
+ ths_trail, ddr2ns(lcd_ix, ths_trail),
+ ths_exit, ddr2ns(lcd_ix, ths_exit));
DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
"tclk_zero %u (%uns)\n",
- tlpx_half, ddr2ns(tlpx_half),
- tclk_trail, ddr2ns(tclk_trail),
- tclk_zero, ddr2ns(tclk_zero));
+ tlpx_half, ddr2ns(lcd_ix, tlpx_half),
+ tclk_trail, ddr2ns(lcd_ix, tclk_trail),
+ tclk_zero, ddr2ns(lcd_ix, tclk_zero));
DSSDBG("tclk_prepare %u (%uns)\n",
- tclk_prepare, ddr2ns(tclk_prepare));
+ tclk_prepare, ddr2ns(lcd_ix, tclk_prepare));
/* program timings */
-
- r = dsi_read_reg(DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG0);
r = FLD_MOD(r, ths_prepare, 31, 24);
r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
r = FLD_MOD(r, ths_trail, 15, 8);
r = FLD_MOD(r, ths_exit, 7, 0);
- dsi_write_reg(DSI_DSIPHY_CFG0, r);
+ dsi_write_reg(lcd_ix, DSI_DSIPHY_CFG0, r);
- r = dsi_read_reg(DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG1);
r = FLD_MOD(r, tlpx_half, 22, 16);
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
- dsi_write_reg(DSI_DSIPHY_CFG1, r);
+ dsi_write_reg(lcd_ix, DSI_DSIPHY_CFG1, r);
- r = dsi_read_reg(DSI_DSIPHY_CFG2);
+ r = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG2);
r = FLD_MOD(r, tclk_prepare, 7, 0);
- dsi_write_reg(DSI_DSIPHY_CFG2, r);
+ dsi_write_reg(lcd_ix, DSI_DSIPHY_CFG2, r);
}
-static int dsi_complexio_init(struct omap_dss_device *dssdev)
+static int dsi_complexio_init(enum dsi lcd_ix, struct omap_dss_device *dssdev)
{
int r = 0;
+ void __iomem *phymux_base = NULL;
+ unsigned int dsimux = 0xFFFFFFFF;
DSSDBG("dsi_complexio_init\n");
+ if (cpu_is_omap44xx()) {
+ phymux_base = ioremap(0x4A100000, 0x1000);
+ /* Turning on DSI PHY Mux*/
+ __raw_writel(dsimux, phymux_base+0x618);
+ dsimux = __raw_readl(phymux_base+0x618);
+ }
+
/* CIO_CLK_ICG, enable L3 clk to CIO */
- REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
+ REG_FLD_MOD(lcd_ix, DSI_CLK_CTRL, 1, 14, 14);
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- dsi_read_reg(DSI_DSIPHY_CFG5);
+ dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG5);
- if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
- DSSERR("ComplexIO PHY not coming out of reset.\n");
- r = -ENODEV;
- goto err;
+ if (wait_for_bit_change(lcd_ix, DSI_DSIPHY_CFG5,
+ 30, 1) != 1) {
+ DSSERR("ComplexIO PHY not coming out of reset.\n");
+ r = -ENODEV;
+ goto err;
}
dsi_complexio_config(dssdev);
-
- r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
+
+ r = dsi_complexio_power(lcd_ix, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err;
- if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
+ if (wait_for_bit_change(lcd_ix, DSI_COMPLEXIO_CFG1,
+ 29, 1) != 1) {
DSSERR("ComplexIO not coming out of reset.\n");
r = -ENODEV;
goto err;
}
- if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
- DSSERR("ComplexIO LDO power down.\n");
- r = -ENODEV;
- goto err;
+ if (cpu_is_omap34xx()) {
+ /*unknown issue with omap4*/
+ if (wait_for_bit_change(lcd_ix, DSI_COMPLEXIO_CFG1,
+ 21, 1) != 1) {
+ DSSERR("ComplexIO LDO power down.\n");
+ r = -ENODEV;
+ goto err;
+ }
}
- dsi_complexio_timings();
+ dsi_complexio_timings(lcd_ix);
/*
The configuration of the DSI complex I/O (number of data lanes,
@@ -1508,28 +1738,31 @@ static int dsi_complexio_init(struct omap_dss_device *dssdev)
bit to 1. If the sequence is not followed, the DSi complex I/O
configuration is undetermined.
*/
- dsi_if_enable(1);
- dsi_if_enable(0);
- REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
- dsi_if_enable(1);
- dsi_if_enable(0);
+
+
+ dsi_if_enable(lcd_ix, 1);
+ dsi_if_enable(lcd_ix, 0);
+ REG_FLD_MOD(lcd_ix, DSI_CLK_CTRL, 1,
+ 20, 20); /* LP_CLK_ENABLE done before now*/
+ dsi_if_enable(lcd_ix, 1);
+ dsi_if_enable(lcd_ix, 0);
DSSDBG("CIO init done\n");
err:
return r;
}
-static void dsi_complexio_uninit(void)
+static void dsi_complexio_uninit(enum dsi lcd_ix)
{
- dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
+ dsi_complexio_power(lcd_ix, DSI_COMPLEXIO_POWER_OFF);
}
-static int _dsi_wait_reset(void)
+static int _dsi_wait_reset(enum dsi lcd_ix)
{
int i = 0;
- while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
- if (i++ > 5) {
+ while (REG_GET(lcd_ix, DSI_SYSSTATUS, 0, 0) == 0) {
+ if (i++ > 50) {
DSSERR("soft reset failed\n");
return -ENODEV;
}
@@ -1539,42 +1772,45 @@ static int _dsi_wait_reset(void)
return 0;
}
-static int _dsi_reset(void)
+static int _dsi_reset(enum dsi lcd_ix)
{
/* Soft reset */
- REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
- return _dsi_wait_reset();
+ REG_FLD_MOD(lcd_ix, DSI_SYSCONFIG, 1, 1, 1);
+ return _dsi_wait_reset(lcd_ix);
}
-static void dsi_reset_tx_fifo(int channel)
+static void dsi_reset_tx_fifo(enum dsi lcd_ix, int channel)
{
u32 mask;
u32 l;
/* set fifosize of the channel to 0, then return the old size */
- l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
+ l = dsi_read_reg(lcd_ix, DSI_TX_FIFO_VC_SIZE);
mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
+ dsi_write_reg(lcd_ix, DSI_TX_FIFO_VC_SIZE, l & ~mask);
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
+ dsi_write_reg(lcd_ix, DSI_TX_FIFO_VC_SIZE, l);
}
-static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
- enum fifo_size size3, enum fifo_size size4)
+static void dsi_config_tx_fifo(enum dsi lcd_ix, enum fifo_size size1,
+ enum fifo_size size2, enum fifo_size size3,
+ enum fifo_size size4)
{
u32 r = 0;
int add = 0;
int i;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- dsi.vc[0].fifo_size = size1;
- dsi.vc[1].fifo_size = size2;
- dsi.vc[2].fifo_size = size3;
- dsi.vc[3].fifo_size = size4;
+ p_dsi->vc[0].fifo_size = size1;
+ p_dsi->vc[1].fifo_size = size2;
+ p_dsi->vc[2].fifo_size = size3;
+ p_dsi->vc[3].fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
- int size = dsi.vc[i].fifo_size;
+ int size = p_dsi->vc[i].fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
@@ -1587,24 +1823,27 @@ static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
add += size;
}
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
+ dsi_write_reg(lcd_ix, DSI_TX_FIFO_VC_SIZE, r);
}
-static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
- enum fifo_size size3, enum fifo_size size4)
+static void dsi_config_rx_fifo(enum dsi lcd_ix, enum fifo_size size1,
+ enum fifo_size size2, enum fifo_size size3,
+ enum fifo_size size4)
{
u32 r = 0;
int add = 0;
int i;
+ struct dsi_struct *p_dsi;
- dsi.vc[0].fifo_size = size1;
- dsi.vc[1].fifo_size = size2;
- dsi.vc[2].fifo_size = size3;
- dsi.vc[3].fifo_size = size4;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ p_dsi->vc[0].fifo_size = size1;
+ p_dsi->vc[1].fifo_size = size2;
+ p_dsi->vc[2].fifo_size = size3;
+ p_dsi->vc[3].fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
- int size = dsi.vc[i].fifo_size;
+ int size = p_dsi->vc[i].fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
@@ -1617,18 +1856,18 @@ static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
add += size;
}
- dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
+ dsi_write_reg(lcd_ix, DSI_RX_FIFO_VC_SIZE, r);
}
-static int dsi_force_tx_stop_mode_io(void)
+static int dsi_force_tx_stop_mode_io(enum dsi lcd_ix)
{
u32 r;
- r = dsi_read_reg(DSI_TIMING1);
+ r = dsi_read_reg(lcd_ix, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
- dsi_write_reg(DSI_TIMING1, r);
+ dsi_write_reg(lcd_ix, DSI_TIMING1, r);
- if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
+ if (wait_for_bit_change(lcd_ix, DSI_TIMING1, 15, 0) != 0) {
DSSERR("TX_STOP bit not going down\n");
return -EIO;
}
@@ -1636,11 +1875,11 @@ static int dsi_force_tx_stop_mode_io(void)
return 0;
}
-static void dsi_vc_print_status(int channel)
+static void dsi_vc_print_status(enum dsi lcd_ix, int channel)
{
u32 r;
- r = dsi_read_reg(DSI_VC_CTRL(channel));
+ r = dsi_read_reg(lcd_ix, DSI_VC_CTRL(channel));
DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
"TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
channel,
@@ -1650,21 +1889,25 @@ static void dsi_vc_print_status(int channel)
FLD_GET(r, 16, 16),
FLD_GET(r, 20, 20));
- r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
+ r = dsi_read_reg(lcd_ix, DSI_TX_FIFO_VC_EMPTINESS);
DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
}
-static int dsi_vc_enable(int channel, bool enable)
+static int dsi_vc_enable(enum dsi lcd_ix, int channel, bool enable)
{
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ if (p_dsi->update_mode != OMAP_DSS_UPDATE_AUTO)
DSSDBG("dsi_vc_enable channel %d, enable %d\n",
channel, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
+ REG_FLD_MOD(lcd_ix, DSI_VC_CTRL(channel), enable, 0, 0);
- if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
+ if (wait_for_bit_change(lcd_ix, DSI_VC_CTRL(channel),
+ 0, enable) != enable) {
DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
return -EIO;
}
@@ -1672,13 +1915,15 @@ static int dsi_vc_enable(int channel, bool enable)
return 0;
}
-static void dsi_vc_initial_config(int channel)
+static void dsi_vc_initial_config(enum dsi lcd_ix, int channel)
{
u32 r;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBGF("%d", channel);
- r = dsi_read_reg(DSI_VC_CTRL(channel));
+ r = dsi_read_reg(lcd_ix, DSI_VC_CTRL(channel));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
@@ -1691,74 +1936,111 @@ static void dsi_vc_initial_config(int channel)
r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
-
+ if (cpu_is_omap44xx()) {
+ r = FLD_MOD(r, 3, 11, 10); /*sv5*/
+ r = FLD_MOD(r, 3, 18, 17); /*sv5*/
+ }
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
- dsi_write_reg(DSI_VC_CTRL(channel), r);
+ dsi_write_reg(lcd_ix, DSI_VC_CTRL(channel), r);
+ p_dsi->vc[channel].mode = DSI_VC_MODE_L4;
+}
+
+static void dsi_vc_initial_config_vp(enum dsi lcd_ix, int channel)
+{
+ u32 r;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- dsi.vc[channel].mode = DSI_VC_MODE_L4;
+ DSSDBGF("%d", channel);
+
+ r = dsi_read_reg(lcd_ix, DSI_VC_CTRL(channel));
+ r = FLD_MOD(r, 1, 1, 1); /* SOURCE, 1 = video port */
+ r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
+ r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
+ r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
+ r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
+ r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
+ r = FLD_MOD(r, 1, 9, 9); /* MODE_SPEED, high speed on/off */
+ r = FLD_MOD(r, 1, 12, 12); /*RGB565_ORDER*/
+ r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
+ r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
+ r = FLD_MOD(r, 1, 30, 30); /* DCS_CMD_ENABLE*/
+ r = FLD_MOD(r, 0, 31, 31); /* DCS_CMD_CODE*/
+ dsi_write_reg(lcd_ix, DSI_VC_CTRL(channel), r);
}
-static void dsi_vc_config_l4(int channel)
+static void dsi_vc_config_l4(enum dsi lcd_ix, int channel)
{
- if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ if (p_dsi->vc[channel].mode == DSI_VC_MODE_L4)
return;
DSSDBGF("%d", channel);
- dsi_vc_enable(channel, 0);
+ dsi_vc_enable(lcd_ix, channel, 0);
- if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+ if (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
DSSERR("vc(%d) busy when trying to config for L4\n", channel);
- REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
+ REG_FLD_MOD(lcd_ix, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
- dsi_vc_enable(channel, 1);
+ dsi_vc_enable(lcd_ix, channel, 1);
- dsi.vc[channel].mode = DSI_VC_MODE_L4;
+ p_dsi->vc[channel].mode = DSI_VC_MODE_L4;
}
-static void dsi_vc_config_vp(int channel)
+static void dsi_vc_config_vp(enum dsi lcd_ix, int channel)
{
- if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
- return;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ if (p_dsi->vc[channel].mode == DSI_VC_MODE_VP)
+ return;
DSSDBGF("%d", channel);
- dsi_vc_enable(channel, 0);
+ dsi_vc_enable(lcd_ix, channel, 0);
- if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
- DSSERR("vc(%d) busy when trying to config for VP\n", channel);
+ if (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+ DSSERR("vc(%d) busy when trying to config for VP\n",
+ channel);
- REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
+ REG_FLD_MOD(lcd_ix, DSI_VC_CTRL(channel), 1,
+ 1, 1); /* SOURCE, 1 = video port */
- dsi_vc_enable(channel, 1);
+ dsi_vc_enable(lcd_ix, channel, 1);
- dsi.vc[channel].mode = DSI_VC_MODE_VP;
+ p_dsi->vc[channel].mode = DSI_VC_MODE_VP;
}
-static void dsi_vc_enable_hs(int channel, bool enable)
+static void dsi_vc_enable_hs(enum dsi lcd_ix, int channel, bool enable)
{
DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
- dsi_vc_enable(channel, 0);
- dsi_if_enable(0);
+ dsi_vc_enable(lcd_ix, channel, 0);
+ dsi_if_enable(lcd_ix, 0);
- REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
+ REG_FLD_MOD(lcd_ix, DSI_VC_CTRL(channel), enable, 9, 9);
- dsi_vc_enable(channel, 1);
- dsi_if_enable(1);
+ dsi_vc_enable(lcd_ix, channel, 1);
+ dsi_if_enable(lcd_ix, 1);
- dsi_force_tx_stop_mode_io();
+ dsi_force_tx_stop_mode_io(lcd_ix);
}
-static void dsi_vc_flush_long_data(int channel)
+static void dsi_vc_flush_long_data(enum dsi lcd_ix, int channel)
{
- while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
- val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -1804,18 +2086,18 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
-static u16 dsi_vc_flush_receive_data(int channel)
+static u16 dsi_vc_flush_receive_data(enum dsi lcd_ix, int channel)
{
/* RX_FIFO_NOT_EMPTY */
- while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
u8 dt;
- val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
- DSSDBG("\trawval %#08x\n", val);
+ val = dsi_read_reg(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(channel));
dt = FLD_GET(val, 5, 0);
if (dt == DSI_DT_RX_ACK_WITH_ERR) {
u16 err = FLD_GET(val, 23, 8);
- dsi_show_rx_ack_with_err(err);
+ if (cpu_is_omap34xx())
+ dsi_show_rx_ack_with_err(err);
} else if (dt == DSI_DT_RX_SHORT_READ_1) {
DSSDBG("\tDCS short response, 1 byte: %#x\n",
FLD_GET(val, 23, 8));
@@ -1825,7 +2107,7 @@ static u16 dsi_vc_flush_receive_data(int channel)
} else if (dt == DSI_DT_RX_DCS_LONG_READ) {
DSSDBG("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
- dsi_vc_flush_long_data(channel);
+ dsi_vc_flush_long_data(lcd_ix, channel);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
@@ -1833,114 +2115,119 @@ static u16 dsi_vc_flush_receive_data(int channel)
return 0;
}
-static int dsi_vc_send_bta(int channel)
+static int dsi_vc_send_bta(enum dsi lcd_ix, int channel)
{
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
- (dsi.debug_write || dsi.debug_read))
- DSSDBG("dsi_vc_send_bta %d\n", channel);
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ if (p_dsi->update_mode != OMAP_DSS_UPDATE_AUTO &&
+ (p_dsi->debug_write || p_dsi->debug_read))
+ DSSDBG("dsi_vc_send_bta %d\n", channel);
- if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
- DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
- dsi_vc_flush_receive_data(channel);
+ WARN_ON(!mutex_is_locked(&(p_dsi->bus_lock)));
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 20, 20)) {
+ /* DSSERR("rx fifo not empty when sending BTA,
+ dumping data:\n"); */
+ dsi_vc_flush_receive_data(lcd_ix, channel);
}
- REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ REG_FLD_MOD(lcd_ix, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
return 0;
}
-int dsi_vc_send_bta_sync(int channel)
+int dsi_vc_send_bta_sync(enum dsi lcd_ix, int channel)
{
int r = 0;
u32 err;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- INIT_COMPLETION(dsi.bta_completion);
+ INIT_COMPLETION(p_dsi->bta_completion);
- dsi_vc_enable_bta_irq(channel);
+ dsi_vc_enable_bta_irq(lcd_ix, channel);
- r = dsi_vc_send_bta(channel);
+ r = dsi_vc_send_bta(lcd_ix, channel);
if (r)
goto err;
- if (wait_for_completion_timeout(&dsi.bta_completion,
+ if (wait_for_completion_timeout(&(p_dsi->bta_completion),
msecs_to_jiffies(500)) == 0) {
DSSERR("Failed to receive BTA\n");
r = -EIO;
goto err;
}
- err = dsi_get_errors();
+ err = dsi_get_errors(lcd_ix);
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
goto err;
}
err:
- dsi_vc_disable_bta_irq(channel);
+ dsi_vc_disable_bta_irq(lcd_ix, channel);
return r;
}
EXPORT_SYMBOL(dsi_vc_send_bta_sync);
-static inline void dsi_vc_write_long_header(int channel, u8 data_type,
- u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(enum dsi lcd_ix, int channel,
+ u8 data_type, u16 len, u8 ecc)
{
u32 val;
u8 data_id;
-
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ WARN_ON(!mutex_is_locked(&(p_dsi->bus_lock)));
+ ecc = 0; /*sv5*/
/*data_id = data_type | channel << 6; */
- data_id = data_type | dsi.vc[channel].dest_per << 6;
+ data_id = data_type | p_dsi->vc[channel].dest_per << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
- dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
+ dsi_write_reg(lcd_ix, DSI_VC_LONG_PACKET_HEADER(channel), val);
}
-static inline void dsi_vc_write_long_payload(int channel,
+static inline void dsi_vc_write_long_payload(enum dsi lcd_ix, int channel,
u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
-
-/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
- b1, b2, b3, b4, val); */
-
- dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+ dsi_write_reg(lcd_ix, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
}
-static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
- u8 ecc)
+static int dsi_vc_send_long(enum dsi lcd_ix,
+ int channel, u8 data_type, u8 *data, u16 len, u8 ecc)
{
- /*u32 val; */
int i;
u8 *p;
int r = 0;
u8 b1, b2, b3, b4;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- if (dsi.debug_write)
+ if (p_dsi->debug_write)
DSSDBG("dsi_vc_send_long, %d bytes\n", len);
/* len + header */
- if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
+ if (p_dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
- dsi_vc_config_l4(channel);
+ dsi_vc_config_l4(lcd_ix, channel);
- dsi_vc_write_long_header(channel, data_type, len, ecc);
+ dsi_vc_write_long_header(lcd_ix, channel, data_type, len, ecc);
/*dsi_vc_print_status(0); */
p = data;
for (i = 0; i < len >> 2; i++) {
- if (dsi.debug_write)
+ if (p_dsi->debug_write)
DSSDBG("\tsending full packet %d\n", i);
/*dsi_vc_print_status(0); */
@@ -1949,14 +2236,15 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
b3 = *p++;
b4 = *p++;
- dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
+ mdelay(2+1);
+ dsi_vc_write_long_payload(lcd_ix, channel, b1, b2, b3, b4);
}
i = len % 4;
if (i) {
b1 = 0; b2 = 0; b3 = 0;
- if (dsi.debug_write)
+ if (p_dsi->debug_write)
DSSDBG("\tsending remainder bytes %d\n", i);
switch (i) {
@@ -1974,108 +2262,115 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
break;
}
- dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
+ dsi_vc_write_long_payload(lcd_ix, channel, b1, b2, b3, 0);
}
return r;
}
-static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
+static int dsi_vc_send_short(enum dsi lcd_ix, int channel, u8 data_type,
+ u16 data, u8 ecc)
{
u32 r;
u8 data_id;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ WARN_ON(!mutex_is_locked(&(p_dsi->bus_lock)));
- if (dsi.debug_write)
+ if (p_dsi->debug_write)
DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
channel,
data_type, data & 0xff, (data >> 8) & 0xff);
- dsi_vc_config_l4(channel);
+ dsi_vc_config_l4(lcd_ix, channel);
- if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
- DSSERR("ERROR FIFO FULL, aborting transfer\n");
- return -EINVAL;
+ if (FLD_GET(dsi_read_reg(lcd_ix, DSI_VC_CTRL(channel)), 16, 16)) {
+ DSSERR("ERROR FIFO FULL, aborting transfer\n");
+ return -EINVAL;
}
- data_id = data_type | channel << 6;
+ data_id = data_type | p_dsi->vc[channel].dest_per << 6;
- r = (data_id << 0) | (data << 8) | (ecc << 24);
+ r = (data_id << 0) | (data << 8) | (0 << 16) | (ecc << 24);
- dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
+ dsi_write_reg(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(channel), r);
return 0;
}
-int dsi_vc_send_null(int channel)
+int dsi_vc_send_null(enum dsi lcd_ix, int channel)
{
u8 nullpkg[] = {0, 0, 0, 0};
- return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
+ return dsi_vc_send_long(lcd_ix, 0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
}
EXPORT_SYMBOL(dsi_vc_send_null);
-int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
+int dsi_vc_dcs_write_nosync(enum dsi lcd_ix, int channel, u8 *data, int len)
{
- int r;
+ int r = 0;
BUG_ON(len == 0);
if (len == 1) {
- r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
+ r = dsi_vc_send_short(lcd_ix, channel, DSI_DT_DCS_SHORT_WRITE_0,
data[0], 0);
} else if (len == 2) {
- r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
+ r = dsi_vc_send_short(lcd_ix, channel, DSI_DT_DCS_SHORT_WRITE_1,
data[0] | (data[1] << 8), 0);
} else {
/* 0x39 = DCS Long Write */
- r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
+ r = dsi_vc_send_long(lcd_ix, channel, DSI_DT_DCS_LONG_WRITE,
data, len, 0);
}
return r;
}
+
EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
-int dsi_vc_dcs_write(int channel, u8 *data, int len)
+int dsi_vc_dcs_write(enum dsi lcd_ix, int channel, u8 *data, int len)
{
- int r;
+ int r = 0;
- r = dsi_vc_dcs_write_nosync(channel, data, len);
- if (r)
- return r;
+ r = dsi_vc_dcs_write_nosync(lcd_ix, channel, data, len);
- r = dsi_vc_send_bta_sync(channel);
+ /*r = dsi_vc_send_bta_sync(lcd_ix, channel);*/
return r;
}
+
EXPORT_SYMBOL(dsi_vc_dcs_write);
-int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
+int dsi_vc_dcs_read(enum dsi lcd_ix, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen)
{
u32 val;
u8 dt;
int r;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- if (dsi.debug_read)
+ if (p_dsi->debug_read)
DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
- r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
+ r = dsi_vc_send_short(lcd_ix, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
if (r)
return r;
- r = dsi_vc_send_bta_sync(channel);
+ r = dsi_vc_send_bta_sync(lcd_ix, channel);
if (r)
return r;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
+ if (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
return -EIO;
}
- val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
- if (dsi.debug_read)
+ val = dsi_read_reg(lcd_ix, DSI_VC_SHORT_PACKET_HEADER(channel));
+ if (p_dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == DSI_DT_RX_ACK_WITH_ERR) {
@@ -2085,7 +2380,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
} else if (dt == DSI_DT_RX_SHORT_READ_1) {
u8 data = FLD_GET(val, 15, 8);
- if (dsi.debug_read)
+ if (p_dsi->debug_read)
DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
if (buflen < 1)
@@ -2096,7 +2391,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
return 1;
} else if (dt == DSI_DT_RX_SHORT_READ_2) {
u16 data = FLD_GET(val, 23, 8);
- if (dsi.debug_read)
+ if (p_dsi->debug_read)
DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
if (buflen < 2)
@@ -2109,7 +2404,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
} else if (dt == DSI_DT_RX_DCS_LONG_READ) {
int w;
int len = FLD_GET(val, 23, 8);
- if (dsi.debug_read)
+ if (p_dsi->debug_read)
DSSDBG("\tDCS long response, len %d\n", len);
if (len > buflen)
@@ -2118,8 +2413,9 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
/* two byte checksum ends the packet, not included in len */
for (w = 0; w < len + 2;) {
int b;
- val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
- if (dsi.debug_read)
+ val = dsi_read_reg(lcd_ix,
+ DSI_VC_SHORT_PACKET_HEADER(channel));
+ if (p_dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -2144,31 +2440,30 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
EXPORT_SYMBOL(dsi_vc_dcs_read);
-int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
+int dsi_vc_set_max_rx_packet_size(enum dsi lcd_ix, int channel, u16 len)
{
int r;
- r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
+ r = dsi_vc_send_short(lcd_ix, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
len, 0);
if (r)
return r;
- r = dsi_vc_send_bta_sync(channel);
+ r = dsi_vc_send_bta_sync(lcd_ix, channel);
return r;
}
EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
-static void dsi_set_lp_rx_timeout(unsigned long ns)
+static void dsi_set_lp_rx_timeout(enum dsi lcd_ix, unsigned long ns)
{
u32 r;
- unsigned x4, x16;
- unsigned long fck;
- unsigned long ticks;
+ unsigned x4 = 0, x16 = 0;
+ unsigned long fck = 0;
+ unsigned long ticks = 0;
/* ticks in DSI_FCK */
-
- fck = dsi_fclk_rate();
+ fck = dsi_fclk_rate(lcd_ix);
ticks = (fck / 1000 / 1000) * ns / 1000;
x4 = 0;
x16 = 0;
@@ -2198,28 +2493,27 @@ static void dsi_set_lp_rx_timeout(unsigned long ns)
x16 = 1;
}
- r = dsi_read_reg(DSI_TIMING2);
+ r = dsi_read_reg(lcd_ix, DSI_TIMING2);
r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
- dsi_write_reg(DSI_TIMING2, r);
+ dsi_write_reg(lcd_ix, DSI_TIMING2, r);
DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "");
}
-static void dsi_set_ta_timeout(unsigned long ns)
+static void dsi_set_ta_timeout(enum dsi lcd_ix, unsigned long ns)
{
u32 r;
- unsigned x8, x16;
- unsigned long fck;
- unsigned long ticks;
+ unsigned x8 = 0, x16 = 0;
+ unsigned long fck = 0;
+ unsigned long ticks = 0;
- /* ticks in DSI_FCK */
- fck = dsi_fclk_rate();
+ fck = dsi_fclk_rate(lcd_ix);
ticks = (fck / 1000 / 1000) * ns / 1000;
x8 = 0;
x16 = 0;
@@ -2249,29 +2543,30 @@ static void dsi_set_ta_timeout(unsigned long ns)
x16 = 1;
}
- r = dsi_read_reg(DSI_TIMING1);
- r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
+ r = dsi_read_reg(lcd_ix, DSI_TIMING1);
+
+ r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
- r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
+ r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
- dsi_write_reg(DSI_TIMING1, r);
+
+ dsi_write_reg(lcd_ix, DSI_TIMING1, r);
DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x8 ? " x8" : "", x16 ? " x16" : "");
+ (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x8 ? " x8" : "", x16 ? " x16" : "");
}
-static void dsi_set_stop_state_counter(unsigned long ns)
+static void dsi_set_stop_state_counter(enum dsi lcd_ix, unsigned long ns)
{
u32 r;
- unsigned x4, x16;
- unsigned long fck;
- unsigned long ticks;
+ unsigned x4 = 0, x16 = 0;
+ unsigned long fck = 0;
+ unsigned long ticks = 0;
/* ticks in DSI_FCK */
-
- fck = dsi_fclk_rate();
+ fck = dsi_fclk_rate(lcd_ix);
ticks = (fck / 1000 / 1000) * ns / 1000;
x4 = 0;
x16 = 0;
@@ -2302,29 +2597,27 @@ static void dsi_set_stop_state_counter(unsigned long ns)
x16 = 1;
}
- r = dsi_read_reg(DSI_TIMING1);
- r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
+ r = dsi_read_reg(lcd_ix, DSI_TIMING1);
+ r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
- r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
+ r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
- dsi_write_reg(DSI_TIMING1, r);
+ dsi_write_reg(lcd_ix, DSI_TIMING1, r);
DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "");
}
-static void dsi_set_hs_tx_timeout(unsigned long ns)
+static void dsi_set_hs_tx_timeout(enum dsi lcd_ix, unsigned long ns)
{
u32 r;
- unsigned x4, x16;
- unsigned long fck;
- unsigned long ticks;
+ unsigned x4 = 0, x16 = 0;
+ unsigned long fck = 0;
+ unsigned long ticks = 0;
- /* ticks in TxByteClkHS */
-
- fck = dsi_get_txbyteclkhs();
+ fck = dsi_get_txbyteclkhs(lcd_ix);
ticks = (fck / 1000 / 1000) * ns / 1000;
x4 = 0;
x16 = 0;
@@ -2354,38 +2647,43 @@ static void dsi_set_hs_tx_timeout(unsigned long ns)
x16 = 1;
}
- r = dsi_read_reg(DSI_TIMING2);
+ r = dsi_read_reg(lcd_ix, DSI_TIMING2);
r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
- dsi_write_reg(DSI_TIMING2, r);
+ dsi_write_reg(lcd_ix, DSI_TIMING2, r);
DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "");
}
static int dsi_proto_config(struct omap_dss_device *dssdev)
{
u32 r;
int buswidth = 0;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
- dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
- DSI_FIFO_SIZE_0,
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ dsi_config_tx_fifo(lcd_ix, DSI_FIFO_SIZE_64,
+ DSI_FIFO_SIZE_64,
DSI_FIFO_SIZE_0,
DSI_FIFO_SIZE_0);
- dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
- DSI_FIFO_SIZE_0,
+ dsi_config_rx_fifo(lcd_ix, DSI_FIFO_SIZE_64,
+ DSI_FIFO_SIZE_64,
DSI_FIFO_SIZE_0,
DSI_FIFO_SIZE_0);
/* XXX what values for the timeouts? */
- dsi_set_stop_state_counter(1000);
- dsi_set_ta_timeout(6400000);
- dsi_set_lp_rx_timeout(48000);
- dsi_set_hs_tx_timeout(1000000);
+ dsi_set_stop_state_counter(lcd_ix, 1000);
+ dsi_set_ta_timeout(lcd_ix, 6400000);
+ dsi_set_lp_rx_timeout(lcd_ix, 48000);
+ dsi_set_hs_tx_timeout(lcd_ix, 1000000);
switch (dssdev->ctrl.pixel_size) {
case 16:
@@ -2401,28 +2699,41 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
BUG();
}
- r = dsi_read_reg(DSI_CTRL);
- r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
- r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
+ r = dsi_read_reg(lcd_ix, DSI_CTRL);
+ r = FLD_MOD(r, (cpu_is_omap44xx()) ? 0 : 1,
+ 1, 1); /* CS_RX_EN */
+ r = FLD_MOD(r, (cpu_is_omap44xx()) ? 0 : 1,
+ 2, 2); /* ECC_RX_EN */
r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
+ r = FLD_MOD(r, (cpu_is_omap44xx()) ? 1 : 0,
+ 9, 9); /*VP_DE_POL */
+ r = FLD_MOD(r, (cpu_is_omap44xx()) ? 1 : 0,
+ 11, 11); /*VP_VSYNC_POL */
r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
- r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
- r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
- r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
-
- dsi_write_reg(DSI_CTRL, r);
+ r = FLD_MOD(r, (cpu_is_omap44xx()) ? 0 : 1,
+ 19, 19); /* EOT_ENABLE */
+ if (cpu_is_omap34xx()) {
+ r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
+ r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE */
+ }
+ dsi_write_reg(lcd_ix, DSI_CTRL, r);
- dsi_vc_initial_config(0);
+ /*We use 2 Virtual Channels in OMAP4, one for
+ * all LS operations and one for all HS operations
+ */
+ dsi_vc_initial_config(lcd_ix, 0);
+ if (cpu_is_omap44xx())
+ dsi_vc_initial_config_vp(lcd_ix, 1);
/* set all vc targets to peripheral 0 */
- dsi.vc[0].dest_per = 0;
- dsi.vc[1].dest_per = 0;
- dsi.vc[2].dest_per = 0;
- dsi.vc[3].dest_per = 0;
+ p_dsi->vc[0].dest_per = 0;
+ p_dsi->vc[1].dest_per = 0;
+ p_dsi->vc[2].dest_per = 0;
+ p_dsi->vc[3].dest_per = 0;
return 0;
}
@@ -2437,26 +2748,31 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
unsigned enter_hs_mode_lat, exit_hs_mode_lat;
unsigned ths_eot;
u32 r;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- r = dsi_read_reg(DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG0);
ths_prepare = FLD_GET(r, 31, 24);
ths_prepare_ths_zero = FLD_GET(r, 23, 16);
ths_zero = ths_prepare_ths_zero - ths_prepare;
ths_trail = FLD_GET(r, 15, 8);
ths_exit = FLD_GET(r, 7, 0);
- r = dsi_read_reg(DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 22, 16) * 2;
tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
- r = dsi_read_reg(DSI_DSIPHY_CFG2);
+ r = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG2);
tclk_prepare = FLD_GET(r, 7, 0);
/* min 8*UI */
tclk_pre = 20;
/* min 60ns + 52*UI */
- tclk_post = ns2ddr(60) + 26;
+ tclk_post = ns2ddr(lcd_ix, 60) + 26;
/* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
if (dssdev->phy.dsi.data1_lane != 0 &&
@@ -2472,10 +2788,10 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
- r = dsi_read_reg(DSI_CLK_TIMING);
+ r = dsi_read_reg(lcd_ix, DSI_CLK_TIMING);
r = FLD_MOD(r, ddr_clk_pre, 15, 8);
r = FLD_MOD(r, ddr_clk_post, 7, 0);
- dsi_write_reg(DSI_CLK_TIMING, r);
+ dsi_write_reg(lcd_ix, DSI_CLK_TIMING, r);
DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
ddr_clk_pre,
@@ -2489,7 +2805,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
FLD_VAL(exit_hs_mode_lat, 15, 0);
- dsi_write_reg(DSI_VM_TIMING7, r);
+ dsi_write_reg(lcd_ix, DSI_VM_TIMING7, r);
DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
enter_hs_mode_lat, exit_hs_mode_lat);
@@ -2499,23 +2815,23 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
#define DSI_DECL_VARS \
int __dsi_cb = 0; u32 __dsi_cv = 0;
-#define DSI_FLUSH(ch) \
+#define DSI_FLUSH(no, ch) \
if (__dsi_cb > 0) { \
/*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
- dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
+ dsi_write_reg(no, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
__dsi_cb = __dsi_cv = 0; \
}
-#define DSI_PUSH(ch, data) \
+#define DSI_PUSH(no, ch, data) \
do { \
__dsi_cv |= (data) << (__dsi_cb * 8); \
/*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
if (++__dsi_cb > 3) \
- DSI_FLUSH(ch); \
+ DSI_FLUSH(no, ch); \
} while (0)
-static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
- int x, int y, int w, int h)
+static int dsi_update_screen_l4(enum dsi lcd_ix,
+ struct omap_dss_device *dssdev, int x, int y, int w, int h)
{
/* Note: supports only 24bit colors in 32bit container */
int first = 1;
@@ -2531,6 +2847,9 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
int horiz_inc;
int current_x;
struct omap_overlay *ovl;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
debug_irq = 0;
@@ -2556,7 +2875,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
* in fifo */
/* When using CPU, max long packet size is TX buffer size */
- max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
+ max_dsi_packet_size = p_dsi->vc[0].fifo_size * 32 * 4;
/* we seem to get better perf if we divide the tx fifo to half,
and while the other half is being sent, we fill the other half
@@ -2583,56 +2902,34 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
first = 0;
#if 1
- /* using fifo not empty */
- /* TX_FIFO_NOT_EMPTY */
- while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
- udelay(1);
- fifo_stalls++;
- if (fifo_stalls > 0xfffff) {
- DSSERR("fifo stalls overflow, pixels left %d\n",
- pixels_left);
- dsi_if_enable(0);
- return -EIO;
- }
- }
-#elif 1
- /* using fifo emptiness */
- while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
- max_dsi_packet_size) {
- fifo_stalls++;
- if (fifo_stalls > 0xfffff) {
- DSSERR("fifo stalls overflow, pixels left %d\n",
- pixels_left);
- dsi_if_enable(0);
- return -EIO;
- }
- }
-#else
- while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
- fifo_stalls++;
- if (fifo_stalls > 0xfffff) {
- DSSERR("fifo stalls overflow, pixels left %d\n",
- pixels_left);
- dsi_if_enable(0);
- return -EIO;
- }
- }
+ /* using fifo not empty */
+ /* TX_FIFO_NOT_EMPTY */
+ while (FLD_GET(dsi_read_reg(lcd_ix, DSI_VC_CTRL(0)), 5, 5)) {
+ udelay(1);
+ fifo_stalls++;
+ if (fifo_stalls > 0xfffff) {
+ DSSERR("fifo stalls overflow, pixels left %d\n",
+ pixels_left);
+ dsi_if_enable(lcd_ix, 0);
+ return -EIO;
+ }
+ }
#endif
pixels = min(max_pixels_per_packet, pixels_left);
pixels_left -= pixels;
- dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(lcd_ix, 0, DSI_DT_DCS_LONG_WRITE,
1 + pixels * bytespp, 0);
- DSI_PUSH(0, dcs_cmd);
+ DSI_PUSH(lcd_ix, 0, dcs_cmd);
while (pixels-- > 0) {
u32 pix = __raw_readl(data++);
- DSI_PUSH(0, (pix >> 16) & 0xff);
- DSI_PUSH(0, (pix >> 8) & 0xff);
- DSI_PUSH(0, (pix >> 0) & 0xff);
+ DSI_PUSH(lcd_ix, 0, (pix >> 16) & 0xff);
+ DSI_PUSH(lcd_ix, 0, (pix >> 8) & 0xff);
+ DSI_PUSH(lcd_ix, 0, (pix >> 0) & 0xff);
current_x++;
if (current_x == x+w) {
@@ -2641,14 +2938,15 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
}
}
- DSI_FLUSH(0);
+ DSI_FLUSH(lcd_ix, 0);
}
return 0;
}
-static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
+static void dsi_update_screen_dispc(enum dsi lcd_ix,
+ struct omap_dss_device *dssdev, u16 x, u16 y,
+ u16 w, u16 h)
{
unsigned bytespp;
unsigned bytespl;
@@ -2658,15 +2956,18 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
unsigned packet_len;
u32 l;
bool use_te_trigger;
- const unsigned channel = 0;
+ const unsigned channel = 1;
/* line buffer is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes considerable TX
* slowdown with update sizes that fill the whole buffer */
const unsigned line_buf_size = 1023 * 3;
+ struct dsi_struct *p_dsi;
+
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+ use_te_trigger = p_dsi->te_enabled && !p_dsi->use_ext_te;
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+ if (p_dsi->update_mode != OMAP_DSS_UPDATE_AUTO)
DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
x, y, w, h);
@@ -2689,18 +2990,19 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
total_len += (bytespf % packet_payload) + 1;
if (0)
- dsi_vc_print_status(1);
+ dsi_vc_print_status(lcd_ix, 1);
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
- dsi_write_reg(DSI_VC_TE(channel), l);
+ dsi_write_reg(lcd_ix, DSI_VC_TE(channel), l);
- dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
+ dsi_vc_write_long_header(lcd_ix, channel, DSI_DT_DCS_LONG_WRITE,
+ packet_len, 0);
if (use_te_trigger)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
- dsi_write_reg(DSI_VC_TE(channel), l);
+ dsi_write_reg(lcd_ix, DSI_VC_TE(channel), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
@@ -2715,12 +3017,12 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
if (use_te_trigger) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
- REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
+ REG_FLD_MOD(lcd_ix, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
- dsi_vc_send_bta(channel);
+ dsi_vc_send_bta(lcd_ix, 0);
#ifdef DSI_CATCH_MISSING_TE
- mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250));
+ mod_timer(&(p_dsi->te_timer), jiffies + msecs_to_jiffies(250));
#endif
}
}
@@ -2741,51 +3043,70 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle();
+ dsi_1.framedone_received = true;
+ wake_up(&dsi_1.waitqueue);
+}
- dsi.framedone_received = true;
- wake_up(&dsi.waitqueue);
+static void dsi2_framedone_irq_callback(void *data, u32 mask)
+{
+ /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
+ * turns itself off. However, DSI still has the pixels in its buffers,
+ * and is sending the data.
+ */
+ /* SIDLEMODE back to smart-idle */
+ dispc_enable_sidle();
+ dsi_2.framedone_received = true;
+ wake_up(&dsi_2.waitqueue);
}
-static void dsi_set_update_region(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
+static void dsi_set_update_region(enum dsi lcd_ix,
+ struct omap_dss_device *dssdev, u16 x, u16 y,
+ u16 w, u16 h)
{
- spin_lock(&dsi.update_lock);
- if (dsi.update_region.dirty) {
- dsi.update_region.x = min(x, dsi.update_region.x);
- dsi.update_region.y = min(y, dsi.update_region.y);
- dsi.update_region.w = max(w, dsi.update_region.w);
- dsi.update_region.h = max(h, dsi.update_region.h);
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ spin_lock(&(p_dsi->update_lock));
+ if (p_dsi->update_region.dirty) {
+ p_dsi->update_region.x = min(x, p_dsi->update_region.x);
+ p_dsi->update_region.y = min(y, p_dsi->update_region.y);
+ p_dsi->update_region.w = max(w, p_dsi->update_region.w);
+ p_dsi->update_region.h = max(h, p_dsi->update_region.h);
} else {
- dsi.update_region.x = x;
- dsi.update_region.y = y;
- dsi.update_region.w = w;
- dsi.update_region.h = h;
+ p_dsi->update_region.x = x;
+ p_dsi->update_region.y = y;
+ p_dsi->update_region.w = w;
+ p_dsi->update_region.h = h;
}
- dsi.update_region.device = dssdev;
- dsi.update_region.dirty = true;
+ p_dsi->update_region.device = dssdev;
+ p_dsi->update_region.dirty = true;
- spin_unlock(&dsi.update_lock);
+ spin_unlock(&(p_dsi->update_lock));
}
-static int dsi_set_update_mode(struct omap_dss_device *dssdev,
+static int dsi_set_update_mode(enum dsi lcd_ix, struct omap_dss_device *dssdev,
enum omap_dss_update_mode mode)
{
int r = 0;
int i;
+ struct dsi_struct *p_dsi;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ WARN_ON(!mutex_is_locked(&(p_dsi->bus_lock)));
- if (dsi.update_mode != mode) {
- dsi.update_mode = mode;
+ if (p_dsi->update_mode != mode) {
+ p_dsi->update_mode = mode;
- /* Mark the overlays dirty, and do apply(), so that we get the
- * overlays configured properly after update mode change. */
+ /* sv HS mode set the GFX threshold there properly before apply
+ * Mark the overlays dirty, and do apply(), so that we get the
+ * overlays configured properly after update mode change.
+ */
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
- if (ovl->manager == dssdev->manager)
+ if (ovl != NULL && ovl->manager == dssdev->manager)
ovl->info_dirty = true;
}
@@ -2793,17 +3114,18 @@ static int dsi_set_update_mode(struct omap_dss_device *dssdev,
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
mode == OMAP_DSS_UPDATE_AUTO) {
+
u16 w, h;
DSSDBG("starting auto update\n");
dssdev->get_resolution(dssdev, &w, &h);
- dsi_set_update_region(dssdev, 0, 0, w, h);
+ dsi_set_update_region(lcd_ix, dssdev, 0, 0, w, h);
- dsi_perf_mark_start_auto();
+ dsi_perf_mark_start_auto(lcd_ix);
- wake_up(&dsi.waitqueue);
+ wake_up(&(p_dsi->waitqueue));
}
}
@@ -2820,20 +3142,23 @@ static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
return r;
}
-static void dsi_handle_framedone(void)
+static void dsi_handle_framedone(enum dsi lcd_ix)
{
int r;
const int channel = 0;
bool use_te_trigger;
+ struct dsi_struct *p_dsi;
- use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+ use_te_trigger = p_dsi->te_enabled && !p_dsi->use_ext_te;
+
+ if (p_dsi->update_mode != OMAP_DSS_UPDATE_AUTO)
DSSDBG("FRAMEDONE\n");
if (use_te_trigger) {
/* enable LP_RX_TO again after the TE */
- REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
+ REG_FLD_MOD(lcd_ix, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
/* Send BTA after the frame. We need this for the TE to work, as TE
@@ -2845,18 +3170,18 @@ static void dsi_handle_framedone(void)
* make sure that the transfer has been completed. It would be more
* optimal, but more complex, to wait only just before starting next
* transfer. */
- r = dsi_vc_send_bta_sync(channel);
+ r = dsi_vc_send_bta_sync(lcd_ix, channel);
if (r)
DSSERR("BTA after framedone failed\n");
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
- DSSERR("Received error during frame transfer:\n");
- dsi_vc_flush_receive_data(0);
+ if (REG_GET(lcd_ix, DSI_VC_CTRL(channel), 20, 20)) {
+ /*DSSERR("Received error during frame transfer:\n");*/
+ dsi_vc_flush_receive_data(lcd_ix, 0);
}
#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
- dispc_fake_vsync_irq();
+ dispc_fake_vsync_irq(lcd_ix);
#endif
}
@@ -2865,104 +3190,111 @@ static int dsi_update_thread(void *data)
unsigned long timeout;
struct omap_dss_device *device;
u16 x, y, w, h;
+ enum dsi lcd_ix = dsi1;
while (1) {
bool sched;
- wait_event_interruptible(dsi.waitqueue,
- dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
- (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
- dsi.update_region.dirty == true) ||
+ wait_event_interruptible(dsi_1.waitqueue,
+ dsi_1.update_mode == OMAP_DSS_UPDATE_AUTO ||
+ (dsi_1.update_mode == OMAP_DSS_UPDATE_MANUAL &&
+ dsi_1.update_region.dirty == true) ||
kthread_should_stop());
if (kthread_should_stop())
break;
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
- if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
- kthread_should_stop()) {
- dsi_bus_unlock();
+ if (dsi_1.update_mode == OMAP_DSS_UPDATE_DISABLED ||
+ kthread_should_stop()) {
+ dsi_bus_unlock(lcd_ix);
break;
}
- dsi_perf_mark_setup();
+ dsi_perf_mark_setup(lcd_ix);
- if (dsi.update_region.dirty) {
- spin_lock(&dsi.update_lock);
- dsi.active_update_region = dsi.update_region;
- dsi.update_region.dirty = false;
- spin_unlock(&dsi.update_lock);
+ if (dsi_1.update_region.dirty) {
+ spin_lock(&dsi_1.update_lock);
+ dsi_1.active_update_region = dsi_1.update_region;
+ dsi_1.update_region.dirty = false;
+ spin_unlock(&dsi_1.update_lock);
}
- device = dsi.active_update_region.device;
- x = dsi.active_update_region.x;
- y = dsi.active_update_region.y;
- w = dsi.active_update_region.w;
- h = dsi.active_update_region.h;
+ device = dsi_1.active_update_region.device;
+ x = dsi_1.active_update_region.x;
+ y = dsi_1.active_update_region.y;
+ w = dsi_1.active_update_region.w;
+ h = dsi_1.active_update_region.h;
if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
+ if (dsi_1.update_mode == OMAP_DSS_UPDATE_MANUAL)
dss_setup_partial_planes(device,
&x, &y, &w, &h);
- dispc_set_lcd_size(w, h);
+ dispc_set_lcd_size(OMAP_DSS_CHANNEL_LCD, w, h);
+ /* TODO: Correct this while adding support for LCD2 */
}
- if (dsi.active_update_region.dirty) {
- dsi.active_update_region.dirty = false;
+ if (dsi_1.active_update_region.dirty) {
+ dsi_1.active_update_region.dirty = false;
/* XXX TODO we don't need to send the coords, if they
* are the same that are already programmed to the
* panel. That should speed up manual update a bit */
device->driver->setup_update(device, x, y, w, h);
}
- dsi_perf_mark_start();
+ dsi_perf_mark_start(lcd_ix);
if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dsi_vc_config_vp(0);
+ /* We do not need to configure the channel to have
+ * source as VP since it is already configure to VP
+ */
+ if (cpu_is_omap34xx())
+ dsi_vc_config_vp(lcd_ix, 0);
- if (dsi.te_enabled && dsi.use_ext_te)
+ if (dsi_1.te_enabled && dsi_1.use_ext_te)
device->driver->wait_for_te(device);
- dsi.framedone_received = false;
+ dsi_1.framedone_received = false;
- dsi_update_screen_dispc(device, x, y, w, h);
+ dsi_update_screen_dispc(lcd_ix, device, x, y, w, h);
/* wait for framedone */
timeout = msecs_to_jiffies(1000);
- wait_event_timeout(dsi.waitqueue,
- dsi.framedone_received == true,
+ timeout = wait_event_timeout(dsi_1.waitqueue,
+ dsi_1.framedone_received == true,
timeout);
- if (!dsi.framedone_received) {
+ if (timeout == 0) {
DSSERR("framedone timeout\n");
DSSERR("failed update %d,%d %dx%d\n",
x, y, w, h);
dispc_enable_sidle();
- dispc_enable_lcd_out(0);
- dsi_reset_tx_fifo(0);
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
+
+ dsi_reset_tx_fifo(lcd_ix, 0);
} else {
- dsi_handle_framedone();
- dsi_perf_show("DISPC");
+ dsi_handle_framedone(lcd_ix);
+ dsi_perf_show(lcd_ix, "DISPC");
}
} else {
- dsi_update_screen_l4(device, x, y, w, h);
- dsi_perf_show("L4");
+ dsi_update_screen_l4(lcd_ix, device, x, y, w, h);
+ dsi_perf_show(lcd_ix, "L4");
}
- sched = atomic_read(&dsi.bus_lock.count) < 0;
+ sched = atomic_read(&dsi_1.bus_lock.count) < 0;
- complete_all(&dsi.update_completion);
+ complete_all(&dsi_1.update_completion);
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
/* XXX We need to give others chance to get the bus lock. Is
* there a better way for this? */
- if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
+ if (dsi_1.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
schedule_timeout_interruptible(1);
}
@@ -2971,6 +3303,136 @@ static int dsi_update_thread(void *data)
return 0;
}
+static int dsi2_update_thread(void *data)
+ {
+ unsigned long timeout;
+ struct omap_dss_device *device;
+ u16 x, y, w, h;
+ enum dsi lcd_ix = dsi2;
+
+ while (1) {
+ bool sched;
+
+ wait_event_interruptible(dsi_2.waitqueue,
+ dsi_2.update_mode == OMAP_DSS_UPDATE_AUTO ||
+ (dsi_2.update_mode == OMAP_DSS_UPDATE_MANUAL &&
+ dsi_2.update_region.dirty == true) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ dsi_bus_lock(lcd_ix);
+
+ if (dsi_2.update_mode == OMAP_DSS_UPDATE_DISABLED ||
+ kthread_should_stop()) {
+ dsi_bus_unlock(lcd_ix);
+ break;
+ }
+
+ dsi_perf_mark_setup(lcd_ix);
+
+ if (dsi_2.update_region.dirty) {
+ spin_lock(&dsi_2.update_lock);
+ dsi_2.active_update_region = dsi_2.update_region;
+ dsi_2.update_region.dirty = false;
+ spin_unlock(&dsi_2.update_lock);
+ }
+
+ device = dsi_2.active_update_region.device;
+ x = dsi_2.active_update_region.x;
+ y = dsi_2.active_update_region.y;
+ w = dsi_2.active_update_region.w;
+ h = dsi_2.active_update_region.h;
+
+ if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+
+ if (dsi_2.update_mode == OMAP_DSS_UPDATE_MANUAL) {
+ dss_setup_partial_planes(device,
+ &x, &y, &w, &h);
+
+ /* XXX there seems to be a bug in this driver
+ * or OMAP hardware. Some updates with certain
+ * widths and x coordinates fail. These widths
+ * are always odd, so "fix" it here for now */
+ if (w & 1) {
+ u16 dw, dh;
+ device->get_resolution(device,
+ &dw, &dh);
+ if (x + w == dw)
+ x &= ~1;
+ ++w;
+ dss_setup_partial_planes(device,
+ &x, &y, &w, &h);
+ }
+
+ }
+
+ dispc_set_lcd_size(OMAP_DSS_CHANNEL_LCD2, w, h);
+ /* TODO: Correct this while adding support for LCD2 */
+ }
+
+ if (dsi_2.active_update_region.dirty) {
+ dsi_2.active_update_region.dirty = false;
+ /* XXX TODO we don't need to send the coords, if they
+ * are the same that are already programmed to the
+ * panel. That should speed up manual update a bit */
+ device->driver->setup_update(device, x, y, w, h);
+ }
+
+ dsi_perf_mark_start(lcd_ix);
+
+ if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ /* We do not need to configure the channel to have
+ * source as VP since it is already configure to VP
+ */
+ if (cpu_is_omap34xx())
+ dsi_vc_config_vp(lcd_ix, 0);
+
+ if (dsi_2.te_enabled && dsi_2.use_ext_te)
+ device->driver->wait_for_te(device);
+
+ dsi_2.framedone_received = false;
+ dsi_update_screen_dispc(lcd_ix, device, x, y, w, h);
+
+ /* wait for framedone */
+ timeout = msecs_to_jiffies(1000);
+ timeout = wait_event_timeout(dsi_2.waitqueue,
+ dsi_2.framedone_received == true,
+ timeout);
+
+ if (timeout == 0) {
+ DSSERR("framedone2 timeout\n");
+ DSSERR("failed update %d,%d %dx%d\n",
+ x, y, w, h);
+ dispc_enable_sidle();
+
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD2, 0);
+ } else {
+ dsi_handle_framedone(lcd_ix);
+ dsi_perf_show(lcd_ix, "DISPC");
+ }
+ } else {
+ dsi_update_screen_l4(lcd_ix, device, x, y, w, h);
+ dsi_perf_show(lcd_ix, "L4");
+ }
+
+ sched = atomic_read(&dsi_2.bus_lock.count) < 0;
+
+ complete_all(&dsi_2.update_completion);
+
+ dsi_bus_unlock(lcd_ix);
+
+ /* XXX We need to give others chance to get the bus lock. Is
+ * there a better way for this? */
+ if (dsi_2.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
+ schedule_timeout_interruptible(1);
+ }
+
+ DSSDBG("update thread exiting\n");
+
+ return 0;
+ }
/* Display funcs */
@@ -2978,59 +3440,92 @@ static int dsi_update_thread(void *data)
static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
{
int r;
+ u32 irq;
+ struct omap_video_timings timings = {
+ .hsw = 4+1,
+ .hfp = 4+1,
+ .hbp = 4+1,
+ .vsw = 0+1, /*before writing to the register it subtracts 1*/
+ .vfp = 0,
+ .vbp = 1,
+ .x_res = 864,
+ .y_res = 480,
+ };
+
+ irq = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_FRAMEDONE :
+ DISPC_IRQ_FRAMEDONE2;
+
+ r = omap_dispc_register_isr((dssdev->channel == OMAP_DSS_CHANNEL_LCD) ?
+ dsi_framedone_irq_callback : dsi2_framedone_irq_callback,
+ NULL, irq);
- r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL,
- DISPC_IRQ_FRAMEDONE);
if (r) {
DSSERR("can't get FRAMEDONE irq\n");
return r;
}
- dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+ dispc_set_lcd_display_type(dssdev->channel, OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
+ dispc_set_parallel_interface_mode(dssdev->channel,
+ OMAP_DSS_PARALLELMODE_DSI);
dispc_enable_fifohandcheck(1);
- dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+ dispc_set_tft_data_lines(dssdev->channel, dssdev->ctrl.pixel_size);
+ dispc_set_lcd_timings(dssdev->channel, &timings);
- {
- struct omap_video_timings timings = {
- .hsw = 1,
- .hfp = 1,
- .hbp = 1,
- .vsw = 1,
- .vfp = 0,
- .vbp = 0,
- };
-
- dispc_set_lcd_timings(&timings);
- }
+ dispc_set_pol_freq(dssdev->channel, dssdev->panel.config,
+ dssdev->panel.acbi, dssdev->panel.acb);
return 0;
}
static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
{
- omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
+ enum dsi lcd_ix;
+ int i;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+
+ if (lcd_ix == dsi1) {
+ i = 0;
+ omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
DISPC_IRQ_FRAMEDONE);
+ } else {
+ i = 1;
+ omap_dispc_unregister_isr(dsi2_framedone_irq_callback, NULL,
+ DISPC_IRQ_FRAMEDONE2);
+ }
}
static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
{
struct dsi_clock_info cinfo;
int r;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ /* We use DSS2_FCK as input clock for OMAP3
+ * and DSS_ALWON_SYS_CLK for OMAP4*/
+
+ if (cpu_is_omap44xx())
+ cinfo.use_dss2_sys_clk = true;
+ else
+ cinfo.use_dss2_fck = true;
- /* we always use DSS2_FCK as input clock */
- cinfo.use_dss2_fck = true;
cinfo.regn = dssdev->phy.dsi.div.regn;
cinfo.regm = dssdev->phy.dsi.div.regm;
cinfo.regm3 = dssdev->phy.dsi.div.regm3;
cinfo.regm4 = dssdev->phy.dsi.div.regm4;
+
r = dsi_calc_clock_rates(&cinfo);
if (r)
return r;
- r = dsi_pll_set_clock_div(&cinfo);
+ r = dsi_pll_set_clock_div(lcd_ix, &cinfo);
if (r) {
DSSERR("Failed to set dsi clocks\n");
return r;
@@ -3044,8 +3539,10 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
struct dispc_clock_info dispc_cinfo;
int r;
unsigned long long fck;
+ enum dsi lcd_ix;
- fck = dsi_get_dsi1_pll_rate();
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ fck = dsi_get_dsi1_pll_rate(lcd_ix);
dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
@@ -3056,7 +3553,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
return r;
}
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(dssdev->channel, &dispc_cinfo);
if (r) {
DSSERR("Failed to set dispc clocks\n");
return r;
@@ -3067,11 +3564,16 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
{
- int r;
+ int r, l = 0;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
- _dsi_print_reset_status();
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
- r = dsi_pll_init(dssdev, true, true);
+ _dsi_print_reset_status(lcd_ix);
+
+ r = dsi_pll_init(lcd_ix, dssdev, true, true);
if (r)
goto err0;
@@ -3079,7 +3581,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err1;
- dss_select_clk_source(true, true);
+ dss_select_clk_source_dsi(lcd_ix, true, true);
DSSDBG("PLL OK\n");
@@ -3087,26 +3589,48 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err2;
- r = dsi_complexio_init(dssdev);
+ r = dsi_complexio_init(lcd_ix, dssdev);
if (r)
goto err2;
- _dsi_print_reset_status();
+ _dsi_print_reset_status(lcd_ix);
dsi_proto_timings(dssdev);
dsi_set_lp_clk_divisor(dssdev);
if (1)
- _dsi_print_reset_status();
+ _dsi_print_reset_status(lcd_ix);
r = dsi_proto_config(dssdev);
if (r)
goto err3;
/* enable interface */
- dsi_vc_enable(0, 1);
- dsi_if_enable(1);
- dsi_force_tx_stop_mode_io();
+ dsi_vc_enable(lcd_ix, 0, 1);
+ dsi_vc_enable(lcd_ix, 1, 1);
+
+ dsi_if_enable(lcd_ix, 1);
+ dsi_force_tx_stop_mode_io(lcd_ix);
+
+ /* magic OMAP4 registers */
+ dsi_write_reg(lcd_ix, DSI_DSIPHY_CFG12, 0x58);
+
+ l = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG14);
+ l = FLD_MOD(l, 1, 31, 31);
+ l = FLD_MOD(l, 0x54, 30, 23);
+ l = FLD_MOD(l, 1, 19, 19);
+ l = FLD_MOD(l, 1, 18, 18);
+ l = FLD_MOD(l, 7, 17, 14);
+ l = FLD_MOD(l, 1, 11, 11);
+ dsi_write_reg(lcd_ix, DSI_DSIPHY_CFG14, l);
+
+ l = 0;
+ l = dsi_read_reg(lcd_ix, DSI_DSIPHY_CFG8);
+ l = FLD_MOD(l, 1, 11, 11);
+ l = FLD_MOD(l, 0x10, 10, 6);
+ l = FLD_MOD(l, 1, 5, 5);
+ l = FLD_MOD(l, 0xE, 3, 0);
+ dsi_write_reg(lcd_ix, DSI_DSIPHY_CFG8, l);
if (dssdev->driver->enable) {
r = dssdev->driver->enable(dssdev);
@@ -3114,56 +3638,72 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
goto err4;
}
- /* enable high-speed after initial config */
- dsi_vc_enable_hs(0, 1);
+ if (cpu_is_omap34xx()) {
+ /* enable high-speed after initial config */
+ dsi_vc_enable_hs(lcd_ix, 0, 1);
+ }
return 0;
err4:
- dsi_if_enable(0);
+ dsi_if_enable(lcd_ix, 0);
err3:
- dsi_complexio_uninit();
+ dsi_complexio_uninit(lcd_ix);
err2:
- dss_select_clk_source(false, false);
+ dss_select_clk_source_dsi(lcd_ix, false, false);
err1:
- dsi_pll_uninit();
+ dsi_pll_uninit(lcd_ix);
err0:
return r;
}
static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
{
+ enum dsi lcd_ix;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+
if (dssdev->driver->disable)
dssdev->driver->disable(dssdev);
- dss_select_clk_source(false, false);
- dsi_complexio_uninit();
- dsi_pll_uninit();
+ dss_select_clk_source_dsi(lcd_ix, false, false);
+ dsi_complexio_uninit(lcd_ix);
+ dsi_pll_uninit(lcd_ix);
}
-static int dsi_core_init(void)
+static int dsi_core_init(enum dsi lcd_ix)
{
- /* Autoidle */
- REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
+
+ REG_FLD_MOD(lcd_ix, DSI_SYSCONFIG, 0, 0, 0);
/* ENWAKEUP */
- REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
+ REG_FLD_MOD(lcd_ix, DSI_SYSCONFIG, 1, 2, 2);
/* SIDLEMODE smart-idle */
- REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
+ REG_FLD_MOD(lcd_ix, DSI_SYSCONFIG, 2, 4, 3);
- _dsi_initialize_irq();
+ _dsi_initialize_irq(lcd_ix);
return 0;
}
+#define GPIO_OE 0x134
+#define GPIO_DATAOUT 0x13C
+#define OMAP24XX_GPIO_CLEARDATAOUT 0x190
+#define OMAP24XX_GPIO_SETDATAOUT 0x194
+
static int dsi_display_enable(struct omap_dss_device *dssdev)
{
int r = 0;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBG("dsi_display_enable\n");
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
+ mutex_lock(&(p_dsi->lock));
+ dsi_bus_lock(lcd_ix);
r = omap_dss_start_device(dssdev);
if (r) {
@@ -3178,14 +3718,19 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
}
enable_clocks(1);
- dsi_enable_pll_clock(1);
+ dsi_enable_pll_clock(lcd_ix, 1);
+
+ dsi_core_init(lcd_ix);
+
+ if (cpu_is_omap44xx())
+ /* DSS_PWR_DSS_DSS_CTRL */
+ omap_writel(0x00030007, 0x4A307100);
+
+ r = _dsi_reset(lcd_ix);
- r = _dsi_reset();
if (r)
goto err2;
- dsi_core_init();
-
r = dsi_display_init_dispc(dssdev);
if (r)
goto err2;
@@ -3196,47 +3741,61 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- dsi.use_ext_te = dssdev->phy.dsi.ext_te;
- r = dsi_set_te(dssdev, dsi.te_enabled);
+ p_dsi->use_ext_te = dssdev->phy.dsi.ext_te;
+
+ r = dsi_set_te(dssdev, p_dsi->te_enabled);
+
if (r)
goto err4;
- dsi_set_update_mode(dssdev, dsi.user_update_mode);
+ if (cpu_is_omap44xx()) {
+ if (lcd_ix == dsi2)
+ p_dsi->update_mode = OMAP_DSS_UPDATE_AUTO;
+ }
+ p_dsi->update_mode = OMAP_DSS_UPDATE_DISABLED;
+ p_dsi->user_update_mode = OMAP_DSS_UPDATE_AUTO;
+
+ dsi_set_update_mode(lcd_ix, dssdev, p_dsi->user_update_mode);
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
return 0;
err4:
-
dsi_display_uninit_dsi(dssdev);
err3:
dsi_display_uninit_dispc(dssdev);
err2:
enable_clocks(0);
- dsi_enable_pll_clock(0);
+ dsi_enable_pll_clock(lcd_ix, 0);
err1:
omap_dss_stop_device(dssdev);
err0:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
DSSDBG("dsi_display_enable FAILED\n");
return r;
}
static void dsi_display_disable(struct omap_dss_device *dssdev)
{
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
DSSDBG("dsi_display_disable\n");
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
+ mutex_lock(&(p_dsi->lock));
+ dsi_bus_lock(lcd_ix);
if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
goto end;
- dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ p_dsi->update_mode = OMAP_DSS_UPDATE_DISABLED;
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
dsi_display_uninit_dispc(dssdev);
@@ -3244,26 +3803,32 @@ static void dsi_display_disable(struct omap_dss_device *dssdev)
dsi_display_uninit_dsi(dssdev);
enable_clocks(0);
- dsi_enable_pll_clock(0);
+ dsi_enable_pll_clock(lcd_ix, 0);
omap_dss_stop_device(dssdev);
end:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
}
static int dsi_display_suspend(struct omap_dss_device *dssdev)
{
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
DSSDBG("dsi_display_suspend\n");
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
+ mutex_lock(&(p_dsi->lock));
+ dsi_bus_lock(lcd_ix);
if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
goto end;
- dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ p_dsi->update_mode = OMAP_DSS_UPDATE_DISABLED;
dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
dsi_display_uninit_dispc(dssdev);
@@ -3271,10 +3836,10 @@ static int dsi_display_suspend(struct omap_dss_device *dssdev)
dsi_display_uninit_dsi(dssdev);
enable_clocks(0);
- dsi_enable_pll_clock(0);
+ dsi_enable_pll_clock(lcd_ix, 0);
end:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
return 0;
}
@@ -3282,11 +3847,16 @@ end:
static int dsi_display_resume(struct omap_dss_device *dssdev)
{
int r;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBG("dsi_display_resume\n");
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
+ mutex_lock(&(p_dsi->lock));
+ dsi_bus_lock(lcd_ix);
if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
DSSERR("dssdev not suspended\n");
@@ -3295,13 +3865,13 @@ static int dsi_display_resume(struct omap_dss_device *dssdev)
}
enable_clocks(1);
- dsi_enable_pll_clock(1);
+ dsi_enable_pll_clock(lcd_ix, 1);
- r = _dsi_reset();
+ r = _dsi_reset(lcd_ix);
if (r)
goto err1;
- dsi_core_init();
+ dsi_core_init(lcd_ix);
r = dsi_display_init_dispc(dssdev);
if (r)
@@ -3313,14 +3883,14 @@ static int dsi_display_resume(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- r = dsi_set_te(dssdev, dsi.te_enabled);
+ r = dsi_set_te(dssdev, p_dsi->te_enabled);
if (r)
goto err2;
- dsi_set_update_mode(dssdev, dsi.user_update_mode);
+ dsi_set_update_mode(lcd_ix, dssdev, p_dsi->user_update_mode);
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
return 0;
@@ -3328,25 +3898,33 @@ err2:
dsi_display_uninit_dispc(dssdev);
err1:
enable_clocks(0);
- dsi_enable_pll_clock(0);
+ dsi_enable_pll_clock(lcd_ix, 0);
err0:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
DSSDBG("dsi_display_resume FAILED\n");
return r;
+
}
static int dsi_display_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
+ u16 x, u16 y, u16 w, u16 h)
{
int r = 0;
u16 dw, dh;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ printk(KERN_DEBUG "dsi_display_update lcd_ix=%d\n", lcd_ix);
DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
- mutex_lock(&dsi.lock);
+ mutex_lock(&(p_dsi->lock));
- if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
+ if (p_dsi->update_mode != OMAP_DSS_UPDATE_MANUAL)
goto end;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
@@ -3371,12 +3949,12 @@ static int dsi_display_update(struct omap_dss_device *dssdev,
goto end;
}
- dsi_set_update_region(dssdev, x, y, w, h);
+ dsi_set_update_region(lcd_ix, dssdev, x, y, w, h);
- wake_up(&dsi.waitqueue);
+ wake_up(&(p_dsi->waitqueue));
end:
- mutex_unlock(&dsi.lock);
+ mutex_unlock(&(p_dsi->lock));
return r;
}
@@ -3384,25 +3962,29 @@ end:
static int dsi_display_sync(struct omap_dss_device *dssdev)
{
bool wait;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBG("dsi_display_sync()\n");
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
+ mutex_lock(&(p_dsi->lock));
+ dsi_bus_lock(lcd_ix);
- if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
- dsi.update_region.dirty) {
- INIT_COMPLETION(dsi.update_completion);
+ if (p_dsi->update_mode == OMAP_DSS_UPDATE_MANUAL &&
+ p_dsi->update_region.dirty) {
+ INIT_COMPLETION(p_dsi->update_completion);
wait = true;
} else {
wait = false;
}
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
if (wait)
- wait_for_completion_interruptible(&dsi.update_completion);
+ wait_for_completion_interruptible(&(p_dsi->update_completion));
DSSDBG("dsi_display_sync() done\n");
return 0;
@@ -3412,17 +3994,23 @@ static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
enum omap_dss_update_mode mode)
{
int r = 0;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBGF("%d", mode);
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
-
- dsi.user_update_mode = mode;
- r = dsi_set_update_mode(dssdev, mode);
+ mutex_lock(&(p_dsi->lock));
+ dsi_bus_lock(lcd_ix);
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
+ if (p_dsi->update_mode != mode) {
+ p_dsi->user_update_mode = mode;
+ p_dsi->update_mode = mode;
+ r = dsi_set_update_mode(lcd_ix, dssdev, mode);
+ }
+ dsi_bus_unlock(lcd_ix);
+ mutex_unlock(&(p_dsi->lock));
return r;
}
@@ -3430,56 +4018,73 @@ static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
static enum omap_dss_update_mode dsi_display_get_update_mode(
struct omap_dss_device *dssdev)
{
- return dsi.update_mode;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+ return p_dsi->update_mode;
}
static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
{
int r = 0;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBGF("%d", enable);
if (!dssdev->driver->enable_te)
return -ENOENT;
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
- dsi.te_enabled = enable;
+ p_dsi->te_enabled = enable;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
goto end;
r = dsi_set_te(dssdev, enable);
end:
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
return r;
}
static int dsi_display_get_te(struct omap_dss_device *dssdev)
{
- return dsi.te_enabled;
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
+ return p_dsi->te_enabled;
}
static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
{
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBGF("%d", rotate);
if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
return -EINVAL;
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
dssdev->driver->set_rotate(dssdev, rotate);
- if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
+ if (p_dsi->update_mode == OMAP_DSS_UPDATE_AUTO) {
u16 w, h;
/* the display dimensions may have changed, so set a new
* update region */
dssdev->get_resolution(dssdev, &w, &h);
- dsi_set_update_region(dssdev, 0, 0, w, h);
+ dsi_set_update_region(lcd_ix, dssdev, 0, 0, w, h);
}
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
return 0;
}
@@ -3494,14 +4099,16 @@ static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
{
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
DSSDBGF("%d", mirror);
if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
return -EINVAL;
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
dssdev->driver->set_mirror(dssdev, mirror);
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
return 0;
}
@@ -3516,17 +4123,18 @@ static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
{
- int r;
-
+ int r = 0;
+ enum dsi lcd_ix;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return -EIO;
DSSDBGF("%d", test_num);
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
/* run test first in low speed mode */
- dsi_vc_enable_hs(0, 0);
+ dsi_vc_enable_hs(lcd_ix, 0, 0);
if (dssdev->driver->run_test) {
r = dssdev->driver->run_test(dssdev, test_num);
@@ -3535,7 +4143,7 @@ static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
}
/* then in high speed */
- dsi_vc_enable_hs(0, 1);
+ dsi_vc_enable_hs(lcd_ix, 0, 1);
if (dssdev->driver->run_test) {
r = dssdev->driver->run_test(dssdev, test_num);
@@ -3544,9 +4152,9 @@ static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
}
end:
- dsi_vc_enable_hs(0, 1);
+ dsi_vc_enable_hs(lcd_ix, 0, 1);
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
return r;
}
@@ -3556,7 +4164,10 @@ static int dsi_display_memory_read(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
int r;
-
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
DSSDBGF("");
if (!dssdev->driver->memory_read)
@@ -3565,16 +4176,16 @@ static int dsi_display_memory_read(struct omap_dss_device *dssdev,
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return -EIO;
- dsi_bus_lock();
+ dsi_bus_lock(lcd_ix);
r = dssdev->driver->memory_read(dssdev, buf, size,
x, y, w, h);
/* Memory read usually changes the update area. This will
* force the next update to re-set the update area */
- dsi.active_update_region.dirty = true;
+ p_dsi->active_update_region.dirty = true;
- dsi_bus_unlock();
+ dsi_bus_unlock(lcd_ix);
return r;
}
@@ -3584,16 +4195,26 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 *fifo_low, u32 *fifo_high)
{
unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
-
- *fifo_high = fifo_size - burst_size_bytes;
- *fifo_low = fifo_size - burst_size_bytes * 8;
+ if (cpu_is_omap34xx()) {
+ *burst_size = OMAP_DSS_BURST_16x32;
+ burst_size_bytes = 16 * 32 / 8;
+ *fifo_high = fifo_size - burst_size_bytes;
+ *fifo_low = fifo_size - burst_size_bytes * 8;
+ } else {
+ *burst_size = OMAP_DSS_BURST_4x32; /* OMAP4: same as 2x128*/
+ burst_size_bytes = 2 * 128 / 8;
+ *fifo_high = 1020; /* check SV comment*/
+ *fifo_low = 956;
+ }
}
int dsi_init_display(struct omap_dss_device *dssdev)
{
+ enum dsi lcd_ix;
+ struct dsi_struct *p_dsi;
+ lcd_ix = (dssdev->channel == OMAP_DSS_CHANNEL_LCD) ? dsi1 : dsi2;
+ p_dsi = (lcd_ix == dsi1) ? &dsi_1 : &dsi_2;
+
DSSDBG("DSI init\n");
dssdev->enable = dsi_display_enable;
@@ -3620,91 +4241,190 @@ int dsi_init_display(struct omap_dss_device *dssdev)
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
- dsi.vc[0].dssdev = dssdev;
- dsi.vc[1].dssdev = dssdev;
+ p_dsi->vc[0].dssdev = dssdev;
+ p_dsi->vc[1].dssdev = dssdev;
return 0;
}
int dsi_init(struct platform_device *pdev)
{
+ int ret;
u32 rev;
int r;
+ enum dsi lcd_ix = dsi1;
+
struct sched_param param = {
.sched_priority = MAX_USER_RT_PRIO-1
};
- spin_lock_init(&dsi.errors_lock);
- dsi.errors = 0;
+ spin_lock_init(&dsi_1.errors_lock);
+ dsi_1.errors = 0;
+
+ /* XXX fail properly */
- init_completion(&dsi.bta_completion);
- init_completion(&dsi.update_completion);
+ init_completion(&dsi_1.bta_completion);
+ init_completion(&dsi_1.update_completion);
- dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
- if (IS_ERR(dsi.thread)) {
+ dsi_1.thread = kthread_create(dsi_update_thread, NULL, "dsi");
+ if (IS_ERR(dsi_1.thread)) {
DSSERR("cannot create kthread\n");
- r = PTR_ERR(dsi.thread);
+ return PTR_ERR(dsi_1.thread);
goto err0;
}
- sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
+ sched_setscheduler(dsi_1.thread, SCHED_FIFO, &param);
- init_waitqueue_head(&dsi.waitqueue);
- spin_lock_init(&dsi.update_lock);
+ init_waitqueue_head(&dsi_1.waitqueue);
+ spin_lock_init(&dsi_1.update_lock);
- mutex_init(&dsi.lock);
- mutex_init(&dsi.bus_lock);
+ mutex_init(&dsi_1.lock);
+ mutex_init(&dsi_1.bus_lock);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi.te_timer);
- dsi.te_timer.function = dsi_te_timeout;
- dsi.te_timer.data = 0;
+ init_timer(&dsi_1.te_timer);
+ dsi_1.te_timer.function = dsi_te_timeout;
+ dsi_1.te_timer.data = 0;
#endif
- dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
- dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
+ dsi_1.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ dsi_1.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
- dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
- if (!dsi.base) {
+ dsi_1.base = ioremap(DSI_BASE, 2000);
+ if (!dsi_1.base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
goto err1;
}
- dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
- if (IS_ERR(dsi.vdds_dsi_reg)) {
- iounmap(dsi.base);
- DSSERR("can't get VDDS_DSI regulator\n");
- r = PTR_ERR(dsi.vdds_dsi_reg);
- goto err2;
+ r = request_irq(INT_44XX_DSS_DSI1_IRQ, dsi_irq_handler,
+ 0, "OMAP DSI", (void *)0);
+
+ if (cpu_is_omap34xx()) {
+ dsi_1.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
+ if (IS_ERR(dsi_1.vdds_dsi_reg)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ r = PTR_ERR(dsi_1.vdds_dsi_reg);
+ goto err2;
+ }
+ } else {
+ ret = twl_i2c_write_u8(TWL_MODULE_PWM, 0xFF, PWM2ON);
+ ret = twl_i2c_write_u8(TWL_MODULE_PWM, 0x7F, PWM2OFF);
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x30, TOGGLE3);
}
enable_clocks(1);
- rev = dsi_read_reg(DSI_REVISION);
+ rev = dsi_read_reg(lcd_ix, DSI_REVISION);
printk(KERN_INFO "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
enable_clocks(0);
- wake_up_process(dsi.thread);
+ wake_up_process(dsi_1.thread);
return 0;
err2:
- iounmap(dsi.base);
+ iounmap(dsi_1.base);
err1:
- kthread_stop(dsi.thread);
+ kthread_stop(dsi_1.thread);
err0:
return r;
}
void dsi_exit(void)
{
- kthread_stop(dsi.thread);
+ kthread_stop(dsi_1.thread);
- regulator_put(dsi.vdds_dsi_reg);
+ if (cpu_is_omap34xx())
+ regulator_put(dsi_1.vdds_dsi_reg);
- iounmap(dsi.base);
+ iounmap(dsi_1.base);
DSSDBG("omap_dsi_exit\n");
}
+int dsi2_init(struct platform_device *pdev)
+{
+ u32 rev;
+ int r;
+ enum dsi lcd_ix = dsi2;
+
+ struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO-1
+ };
+
+ spin_lock_init(&dsi_2.errors_lock);
+ dsi_2.errors = 0;
+
+ init_completion(&dsi_2.bta_completion);
+ init_completion(&dsi_2.update_completion);
+
+ dsi_2.thread = kthread_create(dsi2_update_thread, NULL, "dsi2");
+ if (IS_ERR(dsi_2.thread)) {
+ DSSERR("cannot create kthread\n");
+ return PTR_ERR(dsi_2.thread);
+ goto err0;
+ }
+ sched_setscheduler(dsi_2.thread, SCHED_FIFO, &param);
+
+ init_waitqueue_head(&dsi_2.waitqueue);
+ spin_lock_init(&dsi_2.update_lock);
+
+ mutex_init(&dsi_2.lock);
+ mutex_init(&dsi_2.bus_lock);
+
+#ifdef DSI_CATCH_MISSING_TE
+ init_timer(&dsi_2.te_timer);
+ dsi_2.te_timer.function = dsi_te_timeout;
+ dsi_2.te_timer.data = 0;
+#endif
+ dsi_2.te_enabled = true;
+ dsi_2.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ dsi_2.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
+
+ dsi_2.base = ioremap(DSI2_BASE, 2000);
+ if (!dsi_2.base) {
+ DSSERR("can't ioremap DSI\n");
+ r = -ENOMEM;
+ goto err1;
+ }
+
+ r = request_irq(INT_44XX_DSS_DSI2_IRQ, dsi2_irq_handler,
+ 0, "OMAP DSI2", (void *)0);
+
+ if (cpu_is_omap34xx()) {
+ dsi_2.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
+ if (IS_ERR(dsi_2.vdds_dsi_reg)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ r = PTR_ERR(dsi_2.vdds_dsi_reg);
+ goto err2;
+ }
+ }
+
+ enable_clocks(1);
+
+ rev = dsi_read_reg(lcd_ix, DSI_REVISION);
+ printk(KERN_INFO "OMAP DSI2 rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ enable_clocks(0);
+
+ wake_up_process(dsi_2.thread);
+
+ return 0;
+err2:
+ iounmap(dsi_2.base);
+err1:
+ kthread_stop(dsi_2.thread);
+err0:
+ return r;
+}
+
+void dsi2_exit(void)
+{
+ kthread_stop(dsi_2.thread);
+
+ iounmap(dsi_2.base);
+
+ DSSDBG("omap_dsi2_exit\n");
+}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 9b05ee65a15d..2b737854d7bd 100644..100755
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -29,11 +29,24 @@
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/i2c/twl.h>
#include <plat/display.h>
+#include <plat/omap_hwmod.h>
+
#include "dss.h"
-#define DSS_BASE 0x48050000
+#ifndef CONFIG_ARCH_OMAP4
+/* DSS */
+#define DSS_BASE 0x48050000
+/* DISPLAY CONTROLLER */
+#define DISPC_BASE 0x48050400
+#else
+/* DSS */
+#define DSS_BASE 0x58000000
+/* DISPLAY CONTROLLER */
+#define DISPC_BASE 0x58001000
+#endif
#define DSS_SZ_REGS SZ_512
@@ -46,12 +59,17 @@ struct dss_reg {
#define DSS_REVISION DSS_REG(0x0000)
#define DSS_SYSCONFIG DSS_REG(0x0010)
#define DSS_SYSSTATUS DSS_REG(0x0014)
-#define DSS_IRQSTATUS DSS_REG(0x0018)
-#define DSS_CONTROL DSS_REG(0x0040)
+#define DSS_CONTROL DSS_REG(0x0040)
#define DSS_SDI_CONTROL DSS_REG(0x0044)
#define DSS_PLL_CONTROL DSS_REG(0x0048)
#define DSS_SDI_STATUS DSS_REG(0x005C)
+#ifdef CONFIG_ARCH_OMAP4
+#define DSS_STATUS DSS_REG(0x005C)
+#endif
+void test(void);
+
+
#define REG_GET(idx, start, end) \
FLD_GET(dss_read_reg(idx), start, end)
@@ -71,6 +89,10 @@ static struct {
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
+void __iomem *dss_base;
+void __iomem *dispc_base;
+EXPORT_SYMBOL(dispc_base);
+
static int _omap_dss_wait_reset(void);
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
@@ -237,7 +259,6 @@ void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
DUMPREG(DSS_SYSSTATUS);
- DUMPREG(DSS_IRQSTATUS);
DUMPREG(DSS_CONTROL);
DUMPREG(DSS_SDI_CONTROL);
DUMPREG(DSS_PLL_CONTROL);
@@ -252,10 +273,29 @@ void dss_select_clk_source(bool dsi, bool dispc)
u32 r;
r = dss_read_reg(DSS_CONTROL);
r = FLD_MOD(r, dsi, 1, 1); /* DSI_CLK_SWITCH */
+ if (cpu_is_omap44xx())
+ r = FLD_MOD(r, dsi, 10, 10); /* DSI2_CLK_SWITCH */
r = FLD_MOD(r, dispc, 0, 0); /* DISPC_CLK_SWITCH */
+ /* TODO: extend for LCD2 and HDMI */
dss_write_reg(DSS_CONTROL, r);
}
+#ifdef CONFIG_ARCH_OMAP4
+void dss_select_clk_source_dsi(enum dsi lcd_ix, bool dsi, bool lcd)
+{
+ u32 r;
+ r = dss_read_reg(DSS_CONTROL);
+ if (lcd_ix == dsi1) {
+ r = FLD_MOD(r, dsi, 1, 1); /* DSI_CLK_SWITCH */
+ r = FLD_MOD(r, lcd, 0, 0); /* LCD1_CLK_SWITCH */
+ } else {
+ r = FLD_MOD(r, dsi, 10, 10); /* DSI2_CLK_SWITCH */
+ r = FLD_MOD(r, lcd, 12, 12); /* LCD2_CLK_SWITCH */
+ }
+ dss_write_reg(DSS_CONTROL, r);
+}
+#endif
+
int dss_get_dsi_clk_source(void)
{
return FLD_GET(dss_read_reg(DSS_CONTROL), 1, 1);
@@ -408,9 +448,10 @@ retry:
goto found;
}
}
- } else {
- BUG();
- }
+ } else if (cpu_is_omap34xx()){
+ ;/*do nothing for now*/
+ } else
+ BUG();
found:
if (!match) {
@@ -451,17 +492,10 @@ static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
{
- u32 irqstatus;
-
- irqstatus = dss_read_reg(DSS_IRQSTATUS);
-
- if (irqstatus & (1<<0)) /* DISPC_IRQ */
- dispc_irq_handler();
-#ifdef CONFIG_OMAP2_DSS_DSI
- if (irqstatus & (1<<1)) /* DSI_IRQ */
- dsi_irq_handler();
-#endif
-
+ /* INT_24XX_DSS_IRQ is dedicated for DISPC interrupt request only */
+ /* DSI1, DSI2 and HDMI to be handled in seperate handlers */
+ dispc_irq_handler();
+ /*No irq handler specifically for DSI made yet*/
return IRQ_HANDLED;
}
@@ -482,9 +516,7 @@ static int _omap_dss_wait_reset(void)
static int _omap_dss_reset(void)
{
- /* Soft reset */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
- return _omap_dss_wait_reset();
+ return 0;
}
void dss_set_venc_output(enum omap_dss_venc_type type)
@@ -507,23 +539,37 @@ void dss_set_dac_pwrdn_bgz(bool enable)
REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
}
+void dss_switch_tv_hdmi(int hdmi)
+{
+ REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* 0x1 for HDMI, 0x0 TV */
+ if (hdmi)
+ REG_FLD_MOD(DSS_CONTROL, 0, 9, 8);
+}
+
int dss_init(bool skip_init)
{
- int r;
+ int r, ret;
u32 rev;
+ u32 val;
+ u32 mmcdata2;
+
+
+ dss_base = dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
- dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
if (!dss.base) {
DSSERR("can't ioremap DSS\n");
r = -ENOMEM;
goto fail0;
}
+ test();
if (!skip_init) {
/* disable LCD and DIGIT output. This seems to fix the synclost
* problem that we get, if the bootloader starts the DSS and
* the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+ //omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+ omap_writel(omap_readl(0x48041040) & ~0x3, 0x48041040);
+
/* We need to wait here a bit, otherwise we sometimes start to
* get synclost errors, and after that only power cycle will
@@ -547,12 +593,18 @@ int dss_init(bool skip_init)
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
+if (!cpu_is_omap44xx()) {
r = request_irq(INT_24XX_DSS_IRQ,
cpu_is_omap24xx()
? dss_irq_handler_omap2
: dss_irq_handler_omap3,
0, "OMAP DSS", NULL);
+ } else {
+ r = request_irq(INT_44XX_DSS_IRQ,
+ dss_irq_handler_omap3,
+ 0, "OMAP DSS", (void *)1);
+ }
if (r < 0) {
DSSERR("omap2 dss: request_irq failed\n");
@@ -574,6 +626,7 @@ int dss_init(bool skip_init)
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
return 0;
fail2:
@@ -588,9 +641,36 @@ void dss_exit(void)
{
if (cpu_is_omap34xx())
clk_put(dss.dpll4_m4_ck);
-
+#ifndef CONFIG_ARCH_OMAP4
free_irq(INT_24XX_DSS_IRQ, NULL);
+#else
+ free_irq(INT_44XX_DSS_IRQ, NULL);
+#endif
iounmap(dss.base);
}
+
+
+
+void test(void)
+{
+ u32 b, c;
+ /*a = ioremap(0x58000000, 0x60);*/
+ b = ioremap(0x4A009100, 0x30);
+ c = ioremap(0x4a307100, 0x10);
+
+ if (!b)
+ return;
+ /*printk(KERN_INFO "dss status 0x%x 0x%x\n", __raw_readl(a+0x5c), (a+0x5c));*/
+ printk(KERN_INFO "CM_DSS_CLKSTCTRL 0x%x 0x%x\n", __raw_readl(b), b);
+ printk(KERN_INFO "CM_DSS_DSS_CLKCTRL 0x%x 0x%x\n", __raw_readl(b+0x20), (b+0x20));
+ if (!c)
+ return;
+ printk(KERN_INFO "PM DSS wrst 0x%x 0x%x\n", __raw_readl(c+0x4), (c+0x4));
+
+}
+
+
+
+
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8da5ac42151b..0deb6e1c5f21 100644..100755
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -100,9 +100,9 @@ extern unsigned int dss_debug;
#define DISPC_MAX_FCK 173000000
enum omap_burst_size {
- OMAP_DSS_BURST_4x32 = 0,
- OMAP_DSS_BURST_8x32 = 1,
- OMAP_DSS_BURST_16x32 = 2,
+ OMAP_DSS_BURST_4x32 = 0, /*OMAP_DSS_BURST_2x128 in OMAP4*/
+ OMAP_DSS_BURST_8x32 = 1, /*OMAP_DSS_BURST_4x128 in OMAP4*/
+ OMAP_DSS_BURST_16x32 = 2, /*OMAP_DSS_BURST_8x128 in OMAP4*/
};
enum omap_parallel_interface_mode {
@@ -157,6 +157,9 @@ struct dsi_clock_info {
u8 highfreq;
bool use_dss2_fck;
+#ifdef CONFIG_ARCH_OMAP4
+ bool use_dss2_sys_clk;
+#endif
};
struct seq_file;
@@ -196,7 +199,8 @@ void dss_start_update(struct omap_dss_device *dssdev);
/* overlay */
void dss_init_overlays(struct platform_device *pdev);
void dss_uninit_overlays(struct platform_device *pdev);
-int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev);
+int dss_check_overlay(struct omap_overlay *ovl,
+ struct omap_dss_device *dssdev);
void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
#ifdef L4_EXAMPLE
void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
@@ -207,6 +211,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
int dss_init(bool skip_init);
void dss_exit(void);
+void dss_switch_tv_hdmi(int hdmi);
+
void dss_save_context(void);
void dss_restore_context(void);
@@ -217,6 +223,8 @@ int dss_sdi_enable(void);
void dss_sdi_disable(void);
void dss_select_clk_source(bool dsi, bool dispc);
+void dss_select_clk_source_dsi(enum dsi lcd_ix, bool dsi, bool lcd);
+
int dss_get_dsi_clk_source(void);
int dss_get_dispc_clk_source(void);
void dss_set_venc_output(enum omap_dss_venc_type type);
@@ -239,25 +247,31 @@ int sdi_init_display(struct omap_dss_device *display);
int dsi_init(struct platform_device *pdev);
void dsi_exit(void);
-void dsi_dump_clocks(struct seq_file *s);
-void dsi_dump_regs(struct seq_file *s);
+void dsi_dump_clocks(enum dsi lcd_ix, struct seq_file *s);
+void dsi_dump_regs(enum dsi lcd_ix, struct seq_file *s);
void dsi_save_context(void);
void dsi_restore_context(void);
int dsi_init_display(struct omap_dss_device *display);
-void dsi_irq_handler(void);
-unsigned long dsi_get_dsi1_pll_rate(void);
-int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo);
-int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
- struct dsi_clock_info *cinfo,
+#ifndef CONFIG_ARCH_OMAP4
+void dsi_irq_handler(enum dsi lcd_ix);
+#endif
+unsigned long dsi_get_dsi1_pll_rate(enum dsi lcd_ix);
+int dsi_pll_set_clock_div(enum dsi lcd_ix, struct dsi_clock_info *cinfo);
+int dsi_pll_calc_clock_div_pck(enum dsi lcd_ix, bool is_tft,
+ unsigned long req_pck, struct dsi_clock_info *cinfo,
struct dispc_clock_info *dispc_cinfo);
-int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
- bool enable_hsdiv);
-void dsi_pll_uninit(void);
+int dsi_pll_init(enum dsi lcd_ix, struct omap_dss_device *dssdev,
+ bool enable_hsclk, bool enable_hsdiv);
+void dsi_pll_uninit(enum dsi lcd_ix);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
u32 *fifo_low, u32 *fifo_high);
+int dsi_calc_clock_rates(struct dsi_clock_info *cinfo);
+/* DSI2 */
+int dsi2_init(struct platform_device *pdev);
+void dsi2_exit(void);
/* DPI */
int dpi_init(void);
@@ -270,7 +284,7 @@ void dispc_exit(void);
void dispc_dump_clocks(struct seq_file *s);
void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
-void dispc_fake_vsync_irq(void);
+void dispc_fake_vsync_irq(int disp_id);
void dispc_save_context(void);
void dispc_restore_context(void);
@@ -283,16 +297,29 @@ void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
void dispc_enable_fifohandcheck(bool enable);
-void dispc_set_lcd_size(u16 width, u16 height);
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
void dispc_set_digit_size(u16 width, u16 height);
u32 dispc_get_plane_fifo_size(enum omap_plane plane);
void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
void dispc_enable_fifomerge(bool enable);
void dispc_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size);
+void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
+ u16 pck_div);
void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
+#ifdef CONFIG_ARCH_OMAP4
+void dispc_set_plane_ba_uv0(enum omap_plane plane, u32 paddr);
+void dispc_set_plane_ba_uv1(enum omap_plane plane, u32 paddr);
+void dispc_set_zorder(enum omap_plane plane,
+ enum omap_overlay_zorder zorder);
+void dispc_enable_zorder(enum omap_plane plane, bool enable);
+void dispc_enable_preload(enum omap_plane plane, bool enable);
+void dispc_enable_gamma_table(bool enable);
+void dispc_set_idle_mode(void);
+#endif
+
void dispc_set_plane_pos(enum omap_plane plane, u16 x, u16 y);
void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
void dispc_set_channel_out(enum omap_plane plane,
@@ -307,18 +334,24 @@ int dispc_setup_plane(enum omap_plane plane,
bool ilace,
enum omap_dss_rotation_type rotation_type,
u8 rotation, bool mirror,
- u8 global_alpha);
+ u8 global_alpha, enum omap_channel channel
+#ifdef CONFIG_ARCH_OMAP4
+ , u32 p_uv_addr
+#endif
+ );
bool dispc_go_busy(enum omap_channel channel);
void dispc_go(enum omap_channel channel);
-void dispc_enable_lcd_out(bool enable);
+void dispc_enable_lcd_out(enum omap_channel channel, bool enable);
void dispc_enable_digit_out(bool enable);
int dispc_enable_plane(enum omap_plane plane, bool enable);
void dispc_enable_replication(enum omap_plane plane, bool enable);
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
-void dispc_set_tft_data_lines(u8 data_lines);
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+ enum omap_parallel_interface_mode mode);
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
+void dispc_set_lcd_display_type(enum omap_channel channel,
+ enum omap_lcd_display_type type);
void dispc_set_loadmode(enum omap_dss_load_mode mode);
void dispc_set_default_color(enum omap_channel channel, u32 color);
@@ -335,16 +368,19 @@ bool dispc_trans_key_enabled(enum omap_channel ch);
bool dispc_alpha_blending_enabled(enum omap_channel ch);
bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
-void dispc_set_lcd_timings(struct omap_video_timings *timings);
+void dispc_set_lcd_timings(enum omap_channel channel,
+ struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
-unsigned long dispc_lclk_rate(void);
-unsigned long dispc_pclk_rate(void);
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
+unsigned long dispc_lclk_rate(enum omap_channel channel);
+unsigned long dispc_pclk_rate(enum omap_channel channel);
+void dispc_set_pol_freq(enum omap_channel channel,
+ enum omap_panel_config config, u8 acbi, u8 acb);
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
-int dispc_set_clock_div(struct dispc_clock_info *cinfo);
+int dispc_set_clock_div(enum omap_channel channel,
+ struct dispc_clock_info *cinfo);
int dispc_get_clock_div(struct dispc_clock_info *cinfo);
@@ -367,4 +403,9 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
+/* HDMI*/
+int hdmi_init(struct platform_device *pdev, int code);
+void hdmi_exit(void);
+void hdmi_dump_regs(struct seq_file *s);
+int hdmi_init_display(struct omap_dss_device *display);
#endif
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
new file mode 100644
index 000000000000..d44021be018a
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -0,0 +1,1105 @@
+/*
+ * linux/drivers/video/omap2/dss/hdmi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * HDMI settings from TI's DSS driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <plat/display.h>
+#include <plat/cpu.h>
+#include <plat/hdmi_lib.h>
+#include <plat/gpio.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+#define HDMI_PLLCTRL 0x58006200
+#define HDMI_PHY 0x58006300
+
+/* PLL */
+#define PLLCTRL_PLL_CONTROL 0x0ul
+#define PLLCTRL_PLL_STATUS 0x4ul
+#define PLLCTRL_PLL_GO 0x8ul
+#define PLLCTRL_CFG1 0xCul
+#define PLLCTRL_CFG2 0x10ul
+#define PLLCTRL_CFG3 0x14ul
+#define PLLCTRL_CFG4 0x20ul
+
+/* HDMI PHY */
+#define HDMI_TXPHY_TX_CTRL 0x0ul
+#define HDMI_TXPHY_DIGITAL_CTRL 0x4ul
+#define HDMI_TXPHY_POWER_CTRL 0x8ul
+#define HDMI_TXPHY_PAD_CFG_CTRL 0xCul
+
+static int hdmi_read_edid(struct omap_video_timings *);
+static int get_edid_timing_data(u8 *edid, u16 *pixel_clk, u16 *horizontal_res,
+ u16 *vertical_res);
+/* CEA-861-D Codes */
+const struct omap_video_timings cea861d1 = \
+ {640, 480, 25200, 96, 16, 48, 2, 10, 33};
+const struct omap_video_timings cea861d4 = \
+ {1280, 720, 74250, 40, 110, 220, 5, 5, 20};
+const struct omap_video_timings cea861d16 = \
+ {1920, 1080, 148500, 44, 88, 148, 5, 4, 36};
+const struct omap_video_timings cea861d17 = \
+ {720, 576, 27000, 64, 12, 68, 5, 5, 39};
+const struct omap_video_timings cea861d18 = \
+ {720, 576, 27000, 64, 12, 68, 5, 5, 39};
+const struct omap_video_timings cea861d29 = \
+ {1440, 576, 54000, 128, 24, 136, 5, 5, 39};
+const struct omap_video_timings cea861d30 = \
+ {1440, 576, 54000, 128, 24, 136, 5, 5, 39};
+const struct omap_video_timings cea861d31 = \
+ {1920, 1080, 148500, 44, 528, 148, 5, 4, 36};
+const struct omap_video_timings cea861d35 = \
+ {2880, 480, 108000, 248, 64, 240, 6, 9, 30};
+const struct omap_video_timings cea861d36 = \
+ {2880, 480, 108000, 248, 64, 240, 6, 9, 30};
+const struct omap_video_timings vesad10 = \
+ {1024, 768, 65000, 136, 24, 160, 6, 3, 29};
+const struct omap_video_timings vesadmti39 = \
+ {1680, 1050, 119000, 32, 48, 80, 6, 3, 21};
+
+static struct {
+ void __iomem *base_phy;
+ void __iomem *base_pll;
+ struct mutex lock;
+ int code;
+ HDMI_Timing_t ti;
+} hdmi;
+struct omap_video_timings edid_timings;
+
+static inline void hdmi_write_reg(u32 base, u16 idx, u32 val)
+{
+ void __iomem *b;
+
+ switch (base) {
+ case HDMI_PHY:
+ b = hdmi.base_phy;
+ break;
+ case HDMI_PLLCTRL:
+ b = hdmi.base_pll;
+ break;
+ default:
+ BUG();
+ }
+ __raw_writel(val, b + idx);
+ /* DBG("write = 0x%x idx =0x%x\r\n", val, idx); */
+}
+
+static inline u32 hdmi_read_reg(u32 base, u16 idx)
+{
+ void __iomem *b;
+ u32 l;
+
+ switch (base) {
+ case HDMI_PHY:
+ b = hdmi.base_phy;
+ break;
+ case HDMI_PLLCTRL:
+ b = hdmi.base_pll;
+ break;
+ default:
+ BUG();
+ }
+ l = __raw_readl(b + idx);
+
+ /* DBG("addr = 0x%p rd = 0x%x idx = 0x%x\r\n", (b+idx), l, idx); */
+ return l;
+}
+
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define REG_FLD_MOD(b, i, v, s, e) \
+ hdmi_write_reg(b, i, FLD_MOD(hdmi_read_reg(b, i), v, s, e))
+
+/*
+ * refclk = (sys_clk/(highfreq+1))/(n+1)
+ * so refclk = 38.4/2/(n+1) = 19.2/(n+1)
+ * choose n = 15, makes refclk = 1.2
+ *
+ * m = tclk/cpf*refclk = tclk/2*1.2
+ *
+ * for clkin = 38.2/2 = 192
+ * phy = 2520
+ *
+ * m = 2520*16/2* 192 = 105;
+ *
+ * for clkin = 38.4
+ * phy = 2520
+ *
+ */
+
+#define CPF 2
+
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm4; /* M4_CLOCK_DIV */
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
+};
+
+static void compute_pll(int clkin, int phy,
+ int n, struct hdmi_pll_info *pi)
+{
+ int refclk;
+ u32 temp, mf;
+
+ if (clkin > 3200) /* 32 mHz */
+ refclk = clkin / (2 * (n + 1));
+ else
+ refclk = clkin / (n + 1);
+
+ temp = phy * 100/(CPF * refclk);
+
+ pi->regn = n;
+ pi->regm = temp/100;
+ pi->regm2 = 1;
+
+ mf = (phy - pi->regm * CPF * refclk) * 262144;
+ pi->regmf = mf/(CPF * refclk);
+
+ if (phy > 1000 * 100) {
+ pi->regm4 = phy / 10000;
+ pi->dcofreq = 1;
+ pi->regsd = ((pi->regm * 384)/((n + 1) * 250) + 5)/10;
+ } else {
+ pi->regm4 = 1;
+ pi->dcofreq = 0;
+ pi->regsd = 0;
+ }
+
+ DSSDBG("M = %d Mf = %d, m4= %d\n", pi->regm, pi->regmf, pi->regm4);
+ DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
+}
+
+static int hdmi_pll_init(int refsel, int dcofreq, struct hdmi_pll_info *fmt, u16 sd)
+{
+ u32 r;
+ unsigned t = 500000;
+ u32 pll = HDMI_PLLCTRL;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(pll, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(pll, PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1__PLL_REGM */
+ r = FLD_MOD(r, fmt->regn, 8, 1); /* CFG1__PLL_REGN */
+ r = FLD_MOD(r, fmt->regm4, 25, 21); /* M4_CLOCK_DIV */
+
+ hdmi_write_reg(pll, PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(pll, PLLCTRL_CFG2);
+
+ /* SYS w/o divide by 2 [22:21] = donot care [11:11] = 0x0 */
+ /* SYS divide by 2 [22:21] = 0x3 [11:11] = 0x1 */
+ /* PCLK, REF1 or REF2 [22:21] = 0x0, 0x 1 or 0x2 [11:11] = 0x1 */
+ r = FLD_MOD(r, 0x0, 11, 11); /* PLL_CLKSEL 1: PLL 0: SYS*/
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+ r = FLD_MOD(r, 0x1, 20, 20); /* HSDIVBYPASS assert during locking */
+ r = FLD_MOD(r, refsel, 22, 21); /* REFSEL */
+ /* DPLL3 used by DISPC or HDMI itself*/
+ r = FLD_MOD(r, 0x0, 17, 17); /* M4_CLOCK_PWDN */
+ r = FLD_MOD(r, 0x1, 16, 16); /* M4_CLOCK_EN */
+
+ if (dcofreq) {
+ /* divider programming for 1080p */
+ REG_FLD_MOD(pll, PLLCTRL_CFG3, sd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+
+ hdmi_write_reg(pll, PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(pll, PLLCTRL_CFG4);
+ r = FLD_MOD(r, 0, 24, 18); /* todo: M2 */
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+
+ /* go now */
+ REG_FLD_MOD(pll, PLLCTRL_PLL_GO, 0x1ul, 0, 0);
+
+ /* wait for bit change */
+ while (FLD_GET(hdmi_read_reg(pll, PLLCTRL_PLL_GO), 0, 0))
+
+ /* Wait till the lock bit is set */
+ /* read PLL status */
+ while (0 == FLD_GET(hdmi_read_reg(pll, PLLCTRL_PLL_STATUS), 1, 1)) {
+ udelay(1);
+ if (!--t) {
+ printk(KERN_WARNING "HDMI: cannot lock PLL\n");
+ DSSDBG("CFG1 0x%x\n", hdmi_read_reg(pll, PLLCTRL_CFG1));
+ DSSDBG("CFG2 0x%x\n", hdmi_read_reg(pll, PLLCTRL_CFG2));
+ DSSDBG("CFG4 0x%x\n", hdmi_read_reg(pll, PLLCTRL_CFG4));
+ return -EIO;
+ }
+ }
+
+ DSSDBG("PLL locked!\n");
+
+ r = hdmi_read_reg(pll, PLLCTRL_CFG2);
+ r = FLD_MOD(r, 0, 0, 0); /* PLL_IDLE */
+ r = FLD_MOD(r, 0, 5, 5); /* PLL_PLLLPMODE */
+ r = FLD_MOD(r, 0, 6, 6); /* PLL_LOWCURRSTBY */
+ r = FLD_MOD(r, 0, 8, 8); /* PLL_DRIFTGUARDEN */
+ r = FLD_MOD(r, 0, 10, 9); /* PLL_LOCKSEL */
+ r = FLD_MOD(r, 1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 1, 14, 14); /* PHY_CLKINEN */
+ r = FLD_MOD(r, 0, 15, 15); /* BYPASSEN */
+ r = FLD_MOD(r, 0, 20, 20); /* HSDIVBYPASS */
+ hdmi_write_reg(pll, PLLCTRL_CFG2, r);
+
+ return 0;
+}
+
+static int hdmi_pll_reset(void)
+{
+ int t = 0;
+
+ /* SYSREEST controled by power FSM*/
+ REG_FLD_MOD(HDMI_PLLCTRL, PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ while (!FLD_GET(hdmi_read_reg(HDMI_PLLCTRL,
+ PLLCTRL_PLL_STATUS), 0, 0)) {
+ udelay(1);
+ if (t++ > 1000) {
+ ERR("Failed to sysrest PLL\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+int hdmi_pll_program(struct hdmi_pll_info *fmt)
+{
+ u32 r;
+ int refsel;
+
+ HDMI_PllPwr_t PllPwrWaitParam;
+
+ /* wait for wrapper rest */
+ HDMI_W1_SetWaitSoftReset();
+
+ /* power off PLL */
+ PllPwrWaitParam = HDMI_PLLPWRCMD_ALLOFF;
+ r = HDMI_W1_SetWaitPllPwrState(HDMI_WP,
+ PllPwrWaitParam);
+ if (r)
+ return r;
+
+ /* power on PLL */
+ PllPwrWaitParam = HDMI_PLLPWRCMD_BOTHON_ALLCLKS;
+ r = HDMI_W1_SetWaitPllPwrState(HDMI_WP,
+ PllPwrWaitParam);
+ if (r)
+ return r;
+
+ hdmi_pll_reset();
+
+ refsel = 0x3; /* select SYSCLK reference */
+
+ r = hdmi_pll_init(refsel, fmt->dcofreq, fmt, fmt->regsd);
+
+ return r;
+}
+
+/* double check the order */
+static int hdmi_phy_init(u32 w1,
+ u32 phy)
+{
+ u32 count;
+ int r;
+
+ /* wait till PHY_PWR_STATUS=LDOON */
+ /* HDMI_PHYPWRCMD_LDOON = 1 */
+ r = HDMI_W1_SetWaitPhyPwrState(w1, 1);
+ if (r)
+ return r;
+
+ /* wait till PHY_PWR_STATUS=TXON */
+ r = HDMI_W1_SetWaitPhyPwrState(w1, 2);
+ if (r)
+ return r;
+
+ /* read address 0 in order to get the SCPreset done completed */
+ /* Dummy access performed to solve resetdone issue */
+ hdmi_read_reg(phy, HDMI_TXPHY_TX_CTRL);
+
+ /* write to phy address 0 to configure the clock */
+ /* use HFBITCLK write HDMI_TXPHY_TX_CONTROL__FREQOUT field */
+ REG_FLD_MOD(phy, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(phy, HDMI_TXPHY_DIGITAL_CTRL,
+ 0xF0000000);
+
+ /* setup max LDO voltage */
+ REG_FLD_MOD(phy, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+ /* write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(phy, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ count = 0;
+ while (count++ < 1000)
+
+ return 0;
+}
+
+static int hdmi_phy_off(u32 name)
+{
+ int r = 0;
+ u32 count;
+
+ /* wait till PHY_PWR_STATUS=OFF */
+ /* HDMI_PHYPWRCMD_OFF = 0 */
+ r = HDMI_W1_SetWaitPhyPwrState(name, 0);
+ if (r)
+ return r;
+
+ count = 0;
+ while (count++ < 200)
+
+ return 0;
+}
+
+/* driver */
+static int hdmi_panel_probe(struct omap_dss_device *dssdev)
+{
+ DSSDBG("ENTER hdmi_panel_probe()\n");
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT |
+ OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
+ switch (hdmi.code) {
+ case 1:
+ case 11:
+ dssdev->panel.timings = cea861d1;
+ break;
+ case 4:
+ dssdev->panel.timings = cea861d4;
+ break;
+ case 10:
+ dssdev->panel.timings = vesad10;
+ break;
+ case 30:
+ dssdev->panel.timings = cea861d30;
+ break;
+ case 39:
+ dssdev->panel.timings = vesadmti39;
+ break;
+ case 16:
+ default:
+ dssdev->panel.timings = cea861d16;
+ }
+
+ return 0;
+}
+
+static void hdmi_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_enable(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+
+static void hdmi_panel_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+
+static int hdmi_panel_resume(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+
+static void hdmi_enable_clocks(int enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
+ DSS_CLK_96M);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
+ DSS_CLK_96M);
+}
+
+static struct omap_dss_driver hdmi_driver = {
+ .probe = hdmi_panel_probe,
+ .remove = hdmi_panel_remove,
+
+ .enable = hdmi_panel_enable,
+ .disable = hdmi_panel_disable,
+ .suspend = hdmi_panel_suspend,
+ .resume = hdmi_panel_resume,
+
+ .driver = {
+ .name = "hdmi_panel",
+ .owner = THIS_MODULE,
+ },
+};
+/* driver end */
+
+int hdmi_init(struct platform_device *pdev, int code)
+{
+ DSSDBG("Enter hdmi_init()\n");
+
+ mutex_init(&hdmi.lock);
+
+ hdmi.base_pll = ioremap(HDMI_PLLCTRL, 64);
+ if (!hdmi.base_pll) {
+ ERR("can't ioremap pll\n");
+ return -ENOMEM;
+ }
+ hdmi.base_phy = ioremap(HDMI_PHY, 64);
+
+ if (!hdmi.base_phy) {
+ ERR("can't ioremap phy\n");
+ return -ENOMEM;
+ }
+ hdmi.code = code;
+ hdmi_enable_clocks(1);
+
+ hdmi_lib_init();
+
+ hdmi_enable_clocks(0);
+ return omap_dss_register_driver(&hdmi_driver);
+
+}
+
+void hdmi_exit(void)
+{
+ hdmi_lib_exit();
+
+ iounmap(hdmi.base_pll);
+ iounmap(hdmi.base_phy);
+}
+/* FIXME These are raw writes to GPIO , standard GPIO calls are made
+in board-4430sdp.c file but as the GPIO read is not correctly working
+this is retained and needs to be fixed */
+static void hdmi_gpio_config(int enable)
+{
+ u32 val;
+
+ if (enable) {
+ /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
+ omap_writel(0x01180118, 0x4A100098);
+ /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
+ omap_writel(0x01180118 , 0x4A10009C);
+ /* CONTROL_HDMI_TX_PHY */
+ omap_writel(0x10000000, 0x4A100610);
+
+ /* GPIO 41 line being muxed */
+ val = omap_readl(0x4A100060);
+ val = FLD_MOD(val, 3, 18, 16);
+ omap_writel(val, 0x4A100060);
+
+ /* GPIO 60 line being muxed */
+ val = omap_readl(0x4A100088);
+ val = FLD_MOD(val, 1, 19, 19);
+ val = FLD_MOD(val, 3, 2, 0);
+ omap_writel(0x3, 0x4A100088);
+
+ /* DATA_OUT */
+ val = omap_readl(0x4805513c);
+ val = FLD_MOD(val, 1, 29, 27);
+ val = FLD_MOD(val, 1, 10, 7);
+ omap_writel(val, 0x4805513c);
+
+ /* GPIO_OE */
+ val = omap_readl(0x48055134);
+ val = FLD_MOD(val, 0, 28, 28);
+ val = FLD_MOD(val, 0, 9, 9);
+ omap_writel(val, 0x48055134);
+
+ /* GPIO_SETDATAOUT */
+ val = omap_readl(0x48055194);
+ val = FLD_MOD(val, 1, 28, 28);
+ val = FLD_MOD(val, 1, 9, 9);
+ omap_writel(val, 0x48055194);
+
+ mdelay(120);
+ } else {
+ /* GPIO_OE */
+ val = omap_readl(0x48055134);
+ val = FLD_MOD(val, 1, 28, 28);
+ val = FLD_MOD(val, 1, 9, 9);
+ omap_writel(val, 0x48055134);
+ }
+}
+
+static int hdmi_power_on(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ int mode = 1;
+ struct omap_video_timings *p;
+ struct hdmi_pll_info pll_data;
+
+ int clkin, n, phy;
+
+ switch (hdmi.code) {
+ case 1:
+ case 11:
+ dssdev->panel.timings = cea861d1;
+ break;
+ case 4:
+ dssdev->panel.timings = cea861d4;
+ break;
+ case 10:
+ dssdev->panel.timings = vesad10;
+ break;
+ case 30:
+ dssdev->panel.timings = cea861d30;
+ break;
+ case 39:
+ dssdev->panel.timings = vesadmti39;
+ break;
+ case 16:
+ default:
+ dssdev->panel.timings = cea861d16;
+ }
+
+ if (hdmi.code == 1 || hdmi.code == 10)
+ mode = 0; /* DVI mode */
+
+ hdmi_enable_clocks(1);
+
+ /* FIXME Gpio config is retained and platform enable which does
+ standard GPIO calls is temporarily commented until GPIO read
+ problem is fixed */
+ hdmi_gpio_config(1);
+ /*if (dssdev->platform_enable)
+ dssdev->platform_enable(dssdev);*/
+
+ p = &dssdev->panel.timings;
+
+ r = hdmi_read_edid(p);
+ if (r) {
+ r = -EIO;
+ goto err;
+ }
+
+ clkin = 3840; /* 38.4 mHz */
+ n = 15; /* this is a constant for our math */
+ phy = p->pixel_clock;
+ compute_pll(clkin, phy, n, &pll_data);
+
+ HDMI_W1_StopVideoFrame(HDMI_WP);
+
+ dispc_enable_digit_out(0);
+
+ /* config the PLL and PHY first */
+ r = hdmi_pll_program(&pll_data);
+ if (r) {
+ DSSERR("Failed to lock PLL\n");
+ r = -EIO;
+ goto err;
+ }
+
+ r = hdmi_phy_init(HDMI_WP, HDMI_PHY);
+ if (r) {
+ DSSERR("Failed to start PHY\n");
+ r = -EIO;
+ goto err;
+ }
+
+ DSS_HDMI_CONFIG(hdmi.ti, hdmi.code, mode);
+
+ /* these settings are independent of overlays */
+ dss_switch_tv_hdmi(1);
+
+ /* bypass TV gamma table*/
+ dispc_enable_gamma_table(0);
+
+ /* do not fall into any sort of idle */
+ dispc_set_idle_mode();
+
+ /* tv size */
+ dispc_set_digit_size(dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+
+ HDMI_W1_StartVideoFrame(HDMI_WP);
+
+ dispc_enable_digit_out(1);
+
+err:
+ return r;
+}
+
+static void hdmi_power_off(struct omap_dss_device *dssdev)
+{
+ HDMI_W1_StopVideoFrame(HDMI_WP);
+
+ dispc_enable_digit_out(0);
+
+ hdmi_phy_off(HDMI_WP);
+
+ HDMI_W1_SetWaitPllPwrState(HDMI_WP, HDMI_PLLPWRCMD_ALLOFF);
+
+ /* FIXME Gpio config is retained and platform disable which does
+ standard GPIO calls is temporarily commented until GPIO read
+ problem is fixed */
+ hdmi_gpio_config(0);
+ /*if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);*/
+
+ hdmi_enable_clocks(0);
+
+ /* reset to default */
+
+}
+
+static int hdmi_enable_display(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ DSSDBG("ENTER hdmi_enable_display()\n");
+
+ mutex_lock(&hdmi.lock);
+
+ /* the tv overlay manager is shared*/
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err;
+ }
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ r = hdmi_power_on(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err;
+ }
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+
+}
+
+static void hdmi_disable_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter hdmi_disable_display()\n");
+
+ mutex_lock(&hdmi.lock);
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+ goto end;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
+ /* suspended is the same as disabled with venc */
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ goto end;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ omap_dss_stop_device(dssdev);
+
+ hdmi_power_off(dssdev);
+
+ hdmi.code = 16; /*setting to default only in case of disable and not suspend*/
+end:
+ mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_display_suspend(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("hdmi_display_suspend\n");
+ mutex_lock(&hdmi.lock);
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+ goto end;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ goto end;
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ omap_dss_stop_device(dssdev);
+
+ hdmi_power_off(dssdev);
+end:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static int hdmi_display_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("hdmi_display_resume\n");
+ mutex_lock(&hdmi.lock);
+
+ /* the tv overlay manager is shared*/
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err;
+ }
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ r = hdmi_power_on(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err;
+ }
+
+err:
+ mutex_unlock(&hdmi.lock);
+
+ return r;
+}
+
+static void hdmi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static void hdmi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("hdmi_set_timings\n");
+
+ dssdev->panel.timings = *timings;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /* turn the hdmi off and on to get new timings to use */
+ hdmi_disable_display(dssdev);
+ hdmi_enable_display(dssdev);
+ }
+}
+
+static int hdmi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("hdmi_check_timings\n");
+
+ if (memcmp(&dssdev->panel.timings, timings, sizeof(*timings)) == 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+int hdmi_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("init_display\n");
+
+ dssdev->enable = hdmi_enable_display;
+ dssdev->disable = hdmi_disable_display;
+ dssdev->suspend = hdmi_display_suspend;
+ dssdev->resume = hdmi_display_resume;
+ dssdev->get_timings = hdmi_get_timings;
+ dssdev->set_timings = hdmi_set_timings;
+ dssdev->check_timings = hdmi_check_timings;
+
+ return 0;
+}
+
+static int hdmi_read_edid(struct omap_video_timings *dp)
+{
+ int r = 0;
+ u8 edid[HDMI_EDID_MAX_LENGTH];
+ u16 horizontal_res;
+ u16 vertical_res;
+ u16 pixel_clk;
+ struct omap_video_timings *tp;
+
+ memset(edid, 0, HDMI_EDID_MAX_LENGTH);
+ tp = dp;
+
+ if (HDMI_CORE_DDC_READEDID(HDMI_CORE_SYS, edid) != 0) {
+ printk(KERN_WARNING "HDMI failed to read E-EDID\n");
+ } else {
+ edid_timings.pixel_clock = dp->pixel_clock;
+ edid_timings.x_res = dp->x_res;
+ edid_timings.y_res = dp->y_res;
+ /* search for timings of default resolution */
+ if (get_edid_timing_data(edid, &pixel_clk,
+ &horizontal_res, &vertical_res)) {
+ dp->pixel_clock = pixel_clk * 10; /* be careful */
+ tp = &edid_timings;
+ } else {
+ edid_timings.pixel_clock =
+ cea861d4.pixel_clock;
+ edid_timings.x_res = cea861d4.x_res;
+ edid_timings.y_res = cea861d4.y_res;
+ if (get_edid_timing_data(edid,
+ &pixel_clk, &horizontal_res,
+ &vertical_res)) {
+ dp->pixel_clock = pixel_clk * 10;
+ dp->x_res = horizontal_res;
+ dp->y_res = vertical_res;
+ tp = &edid_timings;
+ if (tp->hfp == 440)
+ hdmi.code = 19; /*720p 50hZ*/
+ else /* 720p 60hZ */
+ hdmi.code = 4;
+ }
+ }
+
+ if (tp->x_res == cea861d16.x_res)
+ switch (tp->hfp) {
+ case 628: /* 1080p 24Hz */
+ hdmi.code = 32;
+ break;
+ case 528: /* 1080p 50Hz */
+ hdmi.code = 31;
+ break;
+ case 88: /* 1080p 60Hz */
+ default:
+ hdmi.code = 16;
+ }
+ }
+ hdmi.ti.pixelPerLine = tp->x_res;
+ hdmi.ti.linePerPanel = tp->y_res;
+ hdmi.ti.horizontalBackPorch = tp->hbp;
+ hdmi.ti.horizontalFrontPorch = tp->hfp;
+ hdmi.ti.horizontalSyncPulse = tp->hsw;
+ hdmi.ti.verticalBackPorch = tp->vbp;
+ hdmi.ti.verticalFrontPorch = tp->vfp;
+ hdmi.ti.verticalSyncPulse = tp->vsw;
+
+err:
+ return r;
+}
+
+u16 current_descriptor_addrs;
+
+void get_horz_vert_timing_info(u8 *edid)
+{
+ /*HORIZONTAL FRONT PORCH */
+ edid_timings.hfp = edid[current_descriptor_addrs + 8];
+ /*HORIZONTAL SYNC WIDTH */
+ edid_timings.hsw = edid[current_descriptor_addrs + 9];
+ /*HORIZONTAL BACK PORCH */
+ edid_timings.hbp = (((edid[current_descriptor_addrs + 4]
+ & 0x0F) << 8) |
+ edid[current_descriptor_addrs + 3]) -
+ (edid_timings.hfp + edid_timings.hsw);
+ /*VERTICAL FRONT PORCH */
+ edid_timings.vfp = ((edid[current_descriptor_addrs + 10] &
+ 0xF0) >> 4);
+ /*VERTICAL SYNC WIDTH */
+ edid_timings.vsw = (edid[current_descriptor_addrs + 10] &
+ 0x0F);
+ /*VERTICAL BACK PORCH */
+ edid_timings.vbp = (((edid[current_descriptor_addrs + 7] &
+ 0x0F) << 8) |
+ edid[current_descriptor_addrs + 6]) -
+ (edid_timings.vfp + edid_timings.vsw);
+
+ printk(KERN_INFO "hfp = %d\n"
+ "hsw = %d\n"
+ "hbp = %d\n"
+ "vfp = %d\n"
+ "vsw = %d\n"
+ "vbp = %d\n",
+ edid_timings.hfp,
+ edid_timings.hsw,
+ edid_timings.hbp,
+ edid_timings.vfp,
+ edid_timings.vsw,
+ edid_timings.vbp);
+
+}
+
+/*------------------------------------------------------------------------------
+ | Function : get_edid_timing_data
+ +------------------------------------------------------------------------------
+ | Description : This function gets the resolution information from EDID
+ |
+ | Parameters : void
+ |
+ | Returns : void
+ +----------------------------------------------------------------------------*/
+static int get_edid_timing_data(u8 *edid, u16 *pixel_clk, u16 *horizontal_res,
+ u16 *vertical_res)
+{
+ u8 offset, effective_addrs;
+ u8 count;
+ u8 flag = false;
+ /* Seach block 0, there are 4 DTDs arranged in priority order */
+ for (count = 0; count < EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR; count++) {
+ current_descriptor_addrs =
+ EDID_DESCRIPTOR_BLOCK0_ADDRESS +
+ count * EDID_TIMING_DESCRIPTOR_SIZE;
+ *horizontal_res =
+ (((edid[EDID_DESCRIPTOR_BLOCK0_ADDRESS + 4 +
+ count * EDID_TIMING_DESCRIPTOR_SIZE] & 0xF0) << 4) |
+ edid[EDID_DESCRIPTOR_BLOCK0_ADDRESS + 2 +
+ count * EDID_TIMING_DESCRIPTOR_SIZE]);
+ *vertical_res =
+ (((edid[EDID_DESCRIPTOR_BLOCK0_ADDRESS + 7 +
+ count * EDID_TIMING_DESCRIPTOR_SIZE] & 0xF0) << 4) |
+ edid[EDID_DESCRIPTOR_BLOCK0_ADDRESS + 5 +
+ count * EDID_TIMING_DESCRIPTOR_SIZE]);
+ DSSDBG("***Block-0-Timing-descriptor[%d]***\n", count);
+#ifdef EDID_DEBUG
+ for (i = current_descriptor_addrs;
+ i <
+ (current_descriptor_addrs+EDID_TIMING_DESCRIPTOR_SIZE);
+ i++)
+ DSSDBG("%d ==> %x\n", i, edid[i]);
+
+ DSSDBG("E-EDID Buffer Index = 0x%x\n"
+ "horizontal_res = %d\n"
+ "vertical_res = %d\n",
+ current_descriptor_addrs,
+ *horizontal_res,
+ *vertical_res
+ );
+#endif
+ if (*horizontal_res == edid_timings.x_res &&
+ *vertical_res == edid_timings.y_res) {
+ DSSDBG("Found EDID Data for %d x %dp\n",
+ *horizontal_res, *vertical_res);
+ flag = true;
+ break;
+ }
+ }
+
+ /*check for the 1080p in extended block CEA DTDs*/
+ if (flag != true) {
+ offset = edid[EDID_DESCRIPTOR_BLOCK1_ADDRESS + 2];
+ if (offset != 0) {
+ effective_addrs = EDID_DESCRIPTOR_BLOCK1_ADDRESS
+ + offset;
+ /*to determine the number of descriptor blocks */
+ for (count = 0;
+ count < EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR;
+ count++) {
+ current_descriptor_addrs = effective_addrs +
+ count * EDID_TIMING_DESCRIPTOR_SIZE;
+ *horizontal_res =
+ (((edid[effective_addrs + 4 +
+ count*EDID_TIMING_DESCRIPTOR_SIZE] &
+ 0xF0) << 4) |
+ edid[effective_addrs + 2 +
+ count * EDID_TIMING_DESCRIPTOR_SIZE]);
+ *vertical_res =
+ (((edid[effective_addrs + 7 +
+ count*EDID_TIMING_DESCRIPTOR_SIZE] &
+ 0xF0) << 4) |
+ edid[effective_addrs + 5 +
+ count * EDID_TIMING_DESCRIPTOR_SIZE]);
+
+ DSSDBG("Block1-Timing-descriptor[%d]\n", count);
+#ifdef EDID_DEBUG
+ for (i = current_descriptor_addrs;
+ i < (current_descriptor_addrs+
+ EDID_TIMING_DESCRIPTOR_SIZE); i++)
+ DSSDBG("%x ==> %x\n",
+ i, edid[i]);
+
+ DSSDBG("current_descriptor = 0x%x\n"
+ "horizontal_res = %d\n"
+ "vertical_res = %d\n",
+ current_descriptor_addrs,
+ *horizontal_res, *vertical_res);
+#endif
+ if (*horizontal_res == edid_timings.x_res &&
+ *vertical_res == edid_timings.y_res) {
+ DSSDBG("Found EDID Data for "
+ "%d x %dp\n",
+ *horizontal_res,
+ *vertical_res
+ );
+ flag = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (flag == true) {
+ *pixel_clk = ((edid[current_descriptor_addrs + 1] << 8) |
+ edid[current_descriptor_addrs]);
+
+ edid_timings.x_res = *horizontal_res;
+ edid_timings.y_res = *vertical_res;
+ edid_timings.pixel_clock = *pixel_clk*10;
+ printk(KERN_INFO "EDID TIMING DATA FOUND\n"
+ "EDID DTD block address = 0x%x\n"
+ "pixel_clk = %d\n"
+ "horizontal res = %d\n"
+ "vertical res = %d\n",
+ current_descriptor_addrs,
+ edid_timings.pixel_clock,
+ edid_timings.x_res,
+ edid_timings.y_res
+ );
+
+ get_horz_vert_timing_info(edid);
+ } else {
+
+ printk(
+ "EDID TIMING DATA supported NOT FOUND\n"
+ "setting default timing values\n"
+ "pixel_clk = %d\n"
+ "horizontal res = %d\n"
+ "vertical res = %d\n",
+ edid_timings.pixel_clock,
+ edid_timings.x_res,
+ edid_timings.y_res
+ );
+
+ *pixel_clk = edid_timings.pixel_clock;
+ *horizontal_res = edid_timings.x_res;
+ *vertical_res = edid_timings.y_res;
+ }
+
+ return flag;
+}
+
+void hdmi_dump_regs(struct seq_file *s)
+{
+ DSSDBG("0x4a100060 x%x\n", omap_readl(0x4A100060));
+ DSSDBG("0x4A100088 x%x\n", omap_readl(0x4A100088));
+ DSSDBG("0x48055134 x%x\n", omap_readl(0x48055134));
+ DSSDBG("0x48055194 x%x\n", omap_readl(0x48055194));
+}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
new file mode 100755
index 000000000000..318274050a7b
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -0,0 +1,223 @@
+/*
+ * drivers/media/video/omap2/dss/hdmi.h
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * hdmi driver
+ */
+#ifndef _OMAP4_HDMI_H_
+#define _OMAP4_HDMI_H_
+
+#define HDMI_EDID_DETAILED_TIMING_OFFSET 0x36 /*EDID Detailed Timing
+ Info 0 begin offset*/
+#define HDMI_EDID_PIX_CLK_OFFSET 0
+#define HDMI_EDID_H_ACTIVE_OFFSET 2
+#define HDMI_EDID_H_BLANKING_OFFSET 3
+#define HDMI_EDID_V_ACTIVE_OFFSET 5
+#define HDMI_EDID_V_BLANKING_OFFSET 6
+#define HDMI_EDID_H_SYNC_OFFSET 8
+#define HDMI_EDID_H_SYNC_PW_OFFSET 9
+#define HDMI_EDID_V_SYNC_OFFSET 10
+#define HDMI_EDID_V_SYNC_PW_OFFSET 10
+#define HDMI_EDID_H_IMAGE_SIZE_OFFSET 12
+#define HDMI_EDID_V_IMAGE_SIZE_OFFSET 13
+#define HDMI_EDID_H_BORDER_OFFSET 15
+#define HDMI_EDID_V_BORDER_OFFSET 16
+#define HDMI_EDID_FLAGS_OFFSET 17
+
+
+#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
+#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
+#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
+#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
+#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
+
+/* HDMI Connected States */
+#define HDMI_STATE_NOMONITOR 0 /* No HDMI monitor connected*/
+#define HDMI_STATE_CONNECTED 1 /* HDMI monitor connected but powered off*/
+#define HDMI_STATE_ON 2 /* HDMI monitor connected and powered on*/
+
+
+/* HDMI EDID Length */
+#define HDMI_EDID_MAX_LENGTH 256
+
+/* HDMI EDID DTDs */
+#define HDMI_EDID_MAX_DTDS 4
+
+/* HDMI EDID DTD Tags */
+#define HDMI_EDID_DTD_TAG_MONITOR_NAME 0xFC
+#define HDMI_EDID_DTD_TAG_MONITOR_SERIALNUM 0xFF
+#define HDMI_EDID_DTD_TAG_MONITOR_LIMITS 0xFD
+
+
+/* HDMI EDID Extension Data Block Tags */
+#define HDMI_EDID_EX_DATABLOCK_TAG_MASK 0xE0
+#define HDMI_EDID_EX_DATABLOCK_LEN_MASK 0x1F
+
+#define HDMI_EDID_EX_DATABLOCK_AUDIO 0x20
+#define HDMI_EDID_EX_DATABLOCK_VIDEO 0x40
+#define HDMI_EDID_EX_DATABLOCK_VENDOR 0x60
+#define HDMI_EDID_EX_DATABLOCK_SPEAKERS 0x80
+
+/* HDMI EDID Extenion Data Block Values: Video */
+#define HDMI_EDID_EX_VIDEO_NATIVE 0x80
+#define HDMI_EDID_EX_VIDEO_MASK 0x7F
+#define HDMI_EDID_EX_VIDEO_MAX 35
+
+#define HDMI_EDID_EX_VIDEO_640x480p_60Hz_4_3 1
+#define HDMI_EDID_EX_VIDEO_720x480p_60Hz_4_3 2
+#define HDMI_EDID_EX_VIDEO_720x480p_60Hz_16_9 3
+#define HDMI_EDID_EX_VIDEO_1280x720p_60Hz_16_9 4
+#define HDMI_EDID_EX_VIDEO_1920x1080i_60Hz_16_9 5
+#define HDMI_EDID_EX_VIDEO_720x480i_60Hz_4_3 6
+#define HDMI_EDID_EX_VIDEO_720x480i_60Hz_16_9 7
+#define HDMI_EDID_EX_VIDEO_720x240p_60Hz_4_3 8
+#define HDMI_EDID_EX_VIDEO_720x240p_60Hz_16_9 9
+#define HDMI_EDID_EX_VIDEO_2880x480i_60Hz_4_3 10
+#define HDMI_EDID_EX_VIDEO_2880x480i_60Hz_16_9 11
+#define HDMI_EDID_EX_VIDEO_2880x480p_60Hz_4_3 12
+#define HDMI_EDID_EX_VIDEO_2880x480p_60Hz_16_9 13
+#define HDMI_EDID_EX_VIDEO_1440x480p_60Hz_4_3 14
+#define HDMI_EDID_EX_VIDEO_1440x480p_60Hz_16_9 15
+#define HDMI_EDID_EX_VIDEO_1920x1080p_60Hz_16_9 16
+#define HDMI_EDID_EX_VIDEO_720x576p_50Hz_4_3 17
+#define HDMI_EDID_EX_VIDEO_720x576p_50Hz_16_9 18
+#define HDMI_EDID_EX_VIDEO_1280x720p_50Hz_16_9 19
+#define HDMI_EDID_EX_VIDEO_1920x1080i_50Hz_16_9 20
+#define HDMI_EDID_EX_VIDEO_720x576i_50Hz_4_3 21
+#define HDMI_EDID_EX_VIDEO_720x576i_50Hz_16_9 22
+#define HDMI_EDID_EX_VIDEO_720x288p_50Hz_4_3 23
+#define HDMI_EDID_EX_VIDEO_720x288p_50Hz_16_9 24
+#define HDMI_EDID_EX_VIDEO_2880x576i_50Hz_4_3 25
+#define HDMI_EDID_EX_VIDEO_2880x576i_50Hz_16_9 26
+#define HDMI_EDID_EX_VIDEO_2880x288p_50Hz_4_3 27
+#define HDMI_EDID_EX_VIDEO_2880x288p_50Hz_16_9 28
+#define HDMI_EDID_EX_VIDEO_1440x576p_50Hz_4_3 29
+#define HDMI_EDID_EX_VIDEO_1440x576p_50Hz_16_9 30
+#define HDMI_EDID_EX_VIDEO_1920x1080p_50Hz_16_9 31
+#define HDMI_EDID_EX_VIDEO_1920x1080p_24Hz_16_9 32
+#define HDMI_EDID_EX_VIDEO_1920x1080p_25Hz_16_9 33
+#define HDMI_EDID_EX_VIDEO_1920x1080p_30Hz_16_9 34
+
+/*--------------------------------------------------------------------- */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Video Descriptor Block */
+typedef struct {
+ u8 pixel_clock[2]; /* 54-55 */
+ u8 horiz_active; /* 56 */
+ u8 horiz_blanking; /* 57 */
+ u8 horiz_high; /* 58 */
+ u8 vert_active; /* 59 */
+ u8 vert_blanking; /* 60 */
+ u8 vert_high; /* 61 */
+ u8 horiz_sync_offset; /* 62 */
+ u8 horiz_sync_pulse; /* 63 */
+ u8 vert_sync_pulse; /* 64 */
+ u8 sync_pulse_high; /* 65 */
+ u8 horiz_image_size; /* 66 */
+ u8 vert_image_size; /* 67 */
+ u8 image_size_high; /* 68 */
+ u8 horiz_border; /* 69 */
+ u8 vert_border; /* 70 */
+ u8 misc_settings; /* 71 */
+}
+HDMI_EDID_DTD_VIDEO;
+
+
+/* Monitor Limits Descriptor Block */
+typedef struct {
+ u8 pixel_clock[2]; /* 54-55*/
+ u8 _reserved1; /* 56 */
+ u8 block_type; /* 57 */
+ u8 _reserved2; /* 58 */
+ u8 min_vert_freq; /* 59 */
+ u8 max_vert_freq; /* 60 */
+ u8 min_horiz_freq; /* 61 */
+ u8 max_horiz_freq; /* 62 */
+ u8 pixel_clock_mhz; /* 63 */
+
+ u8 GTF[2]; /* 64 -65 */
+ u8 start_horiz_freq; /* 66 */
+ u8 C; /* 67 */
+ u8 M[2]; /* 68-69 */
+ u8 K; /* 70 */
+ u8 J; /* 71 */
+}
+HDMI_EDID_DTD_MONITOR;
+
+
+/* Text Descriptor Block */
+typedef struct {
+ u8 pixel_clock[2]; /* 54-55 */
+ u8 _reserved1; /* 56 */
+ u8 block_type; /* 57 */
+ u8 _reserved2; /* 58 */
+
+ u8 text[13]; /* 59-71 */
+}
+HDMI_EDID_DTD_TEXT;
+
+
+/* DTD Union */
+typedef union {
+ HDMI_EDID_DTD_VIDEO video;
+ HDMI_EDID_DTD_TEXT monitor_name;
+ HDMI_EDID_DTD_TEXT monitor_serial_number;
+ HDMI_EDID_DTD_MONITOR monitor_limits;
+}
+HDMI_EDID_DTD;
+
+
+/* EDID struct */
+typedef struct {
+ u8 header[8]; /* 00-07 */
+ u8 manufacturerID[2]; /* 08-09 */
+ u8 product_id[2]; /* 10-11 */
+ u8 serial_number[4]; /* 12-15 */
+ u8 week_manufactured; /* 16 */
+ u8 year_manufactured; /* 17 */
+ u8 edid_version; /* 18 */
+ u8 edid_revision; /* 19 */
+
+ u8 video_in_definition; /* 20 */
+ u8 max_horiz_image_size; /* 21 */
+ u8 max_vert_image_size; /* 22 */
+ u8 display_gamma; /* 23 */
+ u8 power_features; /* 24 */
+ u8 chroma_info[10]; /* 25-34 */
+ u8 timing_1; /* 35 */
+ u8 timing_2; /* 36 */
+ u8 timing_3; /* 37 */
+ u8 std_timings[16]; /* 38-53 */
+
+ HDMI_EDID_DTD DTD[4]; /* 72-125 */
+
+ u8 extension_edid; /* 126 */
+ u8 checksum; /* 127 */
+
+ u8 extension_tag; /* 00 (extensions follow EDID) */
+ u8 extention_rev; /* 01 */
+ u8 offset_dtd; /* 02 */
+ u8 num_dtd; /* 03 */
+
+ u8 data_block[123]; /* 04 - 126 */
+ u8 extension_checksum; /* 127 */
+ }
+HDMI_EDID;
+
+#ifdef __cplusplus
+};
+#endif
+
+#endif
+
+
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 27d9c465c851..cad711f156b7 100644..100755
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -33,6 +33,13 @@
#include "dss.h"
+
+#ifdef CONFIG_ARCH_OMAP4
+#define MAX_DSS_MANAGERS 3
+#else
+#define MAX_DSS_MANAGERS 2
+#endif
+
static int num_managers;
static struct list_head manager_list;
@@ -213,7 +220,7 @@ static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_enabled);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (mgr->info.trans_enabled)? "true" : "false");
}
static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
@@ -244,7 +251,7 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
static ssize_t manager_alpha_blending_enabled_show(
struct omap_overlay_manager *mgr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.alpha_enabled);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (mgr->info.alpha_enabled)? "true" : "false");
}
static ssize_t manager_alpha_blending_enabled_store(
@@ -414,6 +421,11 @@ struct overlay_cache_data {
u32 fifo_high;
bool manual_update;
+
+#ifdef CONFIG_ARCH_OMAP4
+ u32 p_uv_addr; /* relevant for NV12 format only */
+ enum omap_overlay_zorder zorder;
+#endif
};
struct manager_cache_data {
@@ -443,8 +455,14 @@ struct manager_cache_data {
static struct {
spinlock_t lock;
+#ifdef CONFIG_ARCH_OMAP4
+ struct overlay_cache_data overlay_cache[4];
+ struct manager_cache_data manager_cache[3];
+#else
struct overlay_cache_data overlay_cache[3];
struct manager_cache_data manager_cache[2];
+#endif
+
bool irq_enabled;
} dss_cache;
@@ -512,22 +530,33 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (!mgr->device)
return 0;
+ channel = mgr->device->channel;
- if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC
+ || mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- channel = OMAP_DSS_CHANNEL_DIGIT;
} else {
if (mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
mode = mgr->device->get_update_mode(mgr->device);
if (mode != OMAP_DSS_UPDATE_AUTO)
return 0;
-
+#ifdef CONFIG_ARCH_OMAP4
+ irq = (channel == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_FRAMEDONE
+ : DISPC_IRQ_FRAMEDONE2;
+#else
irq = DISPC_IRQ_FRAMEDONE;
+#endif
} else {
+#ifdef CONFIG_ARCH_OMAP4
+ irq = (channel == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC
+ : DISPC_IRQ_VSYNC2;
+#else
irq = DISPC_IRQ_VSYNC;
+#endif
}
- channel = OMAP_DSS_CHANNEL_LCD;
}
mc = &dss_cache.manager_cache[mgr->id];
@@ -585,22 +614,33 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
return 0;
dssdev = ovl->manager->device;
+ channel = dssdev->channel;
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- channel = OMAP_DSS_CHANNEL_DIGIT;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
mode = dssdev->get_update_mode(dssdev);
if (mode != OMAP_DSS_UPDATE_AUTO)
return 0;
-
+#ifdef CONFIG_ARCH_OMAP4
+ irq = (channel == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_FRAMEDONE
+ : DISPC_IRQ_FRAMEDONE2;
+#else
irq = DISPC_IRQ_FRAMEDONE;
+#endif
} else {
+#ifdef CONFIG_ARCH_OMAP4
+ irq = (channel == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC
+ : DISPC_IRQ_VSYNC2;
+#else
irq = DISPC_IRQ_VSYNC;
+#endif
}
- channel = OMAP_DSS_CHANNEL_LCD;
}
oc = &dss_cache.overlay_cache[ovl->id];
@@ -722,7 +762,7 @@ static int configure_overlay(enum omap_plane plane)
outw = c->out_width == 0 ? c->width : c->out_width;
outh = c->out_height == 0 ? c->height : c->out_height;
paddr = c->paddr;
-
+/* TODO: OMAP4: check if changes are needed here for NV12 ?*/
if (c->manual_update && mc->do_manual_update) {
unsigned bpp;
/* If the overlay is outside the update region, disable it */
@@ -801,7 +841,11 @@ static int configure_overlay(enum omap_plane plane)
c->rotation_type,
c->rotation,
c->mirror,
- c->global_alpha);
+ c->global_alpha, c->channel
+#ifdef CONFIG_ARCH_OMAP4
+ , c->p_uv_addr
+#endif
+ );
if (r) {
/* this shouldn't happen */
@@ -811,10 +855,13 @@ static int configure_overlay(enum omap_plane plane)
}
dispc_enable_replication(plane, c->replication);
-
dispc_set_burst_size(plane, c->burst_size);
dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+#ifdef CONFIG_ARCH_OMAP4
+ dispc_set_zorder(plane, c->zorder);
+ dispc_enable_zorder(plane, 1);
+#endif
dispc_enable_plane(plane, 1);
return 0;
@@ -827,7 +874,7 @@ static void configure_manager(enum omap_channel channel)
DSSDBGF("%d", channel);
c = &dss_cache.manager_cache[channel];
-
+ dispc_set_default_color(channel, c->default_color);
dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
dispc_enable_trans_key(channel, c->trans_enabled);
dispc_enable_alpha_blending(channel, c->alpha_enabled);
@@ -845,21 +892,25 @@ static int configure_dispc(void)
const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
int i;
int r;
- bool mgr_busy[2];
- bool mgr_go[2];
+ bool mgr_busy[MAX_DSS_MANAGERS];
+ bool mgr_go[MAX_DSS_MANAGERS];
bool busy;
r = 0;
busy = false;
- mgr_busy[0] = dispc_go_busy(0);
- mgr_busy[1] = dispc_go_busy(1);
- mgr_go[0] = false;
- mgr_go[1] = false;
+ for (i=0; i < num_mgrs; i++) {
+ mgr_busy[i] = dispc_go_busy(i);
+ mgr_go[i] = false;
+ }
/* Commit overlay settings */
for (i = 0; i < num_ovls; ++i) {
oc = &dss_cache.overlay_cache[i];
+ if (oc->channel > 3) {
+ DSSERR("overlay cache out of bounds %d ", oc->channel);
+ continue;
+ }
mc = &dss_cache.manager_cache[oc->channel];
if (!oc->dirty)
@@ -1064,7 +1115,7 @@ void dss_start_update(struct omap_dss_device *dssdev)
mc->shadow_dirty = false;
}
- dispc_enable_lcd_out(1);
+ dispc_enable_lcd_out(dssdev->channel, 1);
}
static void dss_apply_irq_handler(void *data, u32 mask)
@@ -1074,10 +1125,10 @@ static void dss_apply_irq_handler(void *data, u32 mask)
const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
int i, r;
- bool mgr_busy[2];
+ bool mgr_busy[MAX_DSS_MANAGERS];
- mgr_busy[0] = dispc_go_busy(0);
- mgr_busy[1] = dispc_go_busy(1);
+ for (i = 0; i < num_mgrs; i++)
+ mgr_busy[i] = dispc_go_busy(i);
spin_lock(&dss_cache.lock);
@@ -1100,17 +1151,22 @@ static void dss_apply_irq_handler(void *data, u32 mask)
/* re-read busy flags */
mgr_busy[0] = dispc_go_busy(0);
mgr_busy[1] = dispc_go_busy(1);
+ mgr_busy[2] = dispc_go_busy(2);
/* keep running as long as there are busy managers, so that
* we can collect overlay-applied information */
- for (i = 0; i < num_mgrs; ++i) {
+ for (i = 0; i < num_mgrs; ++i)
if (mgr_busy[i])
goto end;
- }
omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN);
+ DISPC_IRQ_EVSYNC_EVEN
+#ifdef CONFIG_ARCH_OMAP4
+ | DISPC_IRQ_VSYNC2
+#endif
+ );
+
dss_cache.irq_enabled = false;
end:
@@ -1138,6 +1194,9 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
ovl = omap_dss_get_overlay(i);
+ if (ovl == NULL)
+ break;
+
if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
continue;
@@ -1171,6 +1230,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->dirty = true;
oc->paddr = ovl->info.paddr;
+#ifdef CONFIG_ARCH_OMAP4
+ oc->p_uv_addr = ovl->info.p_uv_addr;
+ oc->zorder = ovl->info.zorder;
+#endif
oc->vaddr = ovl->info.vaddr;
oc->screen_width = ovl->info.screen_width;
oc->width = ovl->info.width;
@@ -1191,6 +1254,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
oc->channel = ovl->manager->id;
+ /* TODO: to change with dssdev->channel? */
oc->enabled = true;
@@ -1281,6 +1345,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
case OMAP_DISPLAY_TYPE_DBI:
case OMAP_DISPLAY_TYPE_SDI:
case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_HDMI:
default_get_overlay_fifo_thresholds(ovl->id, size,
&oc->burst_size, &oc->fifo_low,
&oc->fifo_high);
@@ -1302,7 +1367,12 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
if (!dss_cache.irq_enabled) {
r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN);
+ DISPC_IRQ_EVSYNC_EVEN
+#ifdef CONFIG_ARCH_OMAP4
+ | DISPC_IRQ_VSYNC2
+#endif
+ );
+
dss_cache.irq_enabled = true;
}
configure_dispc();
@@ -1367,7 +1437,7 @@ int dss_init_overlay_managers(struct platform_device *pdev)
num_managers = 0;
- for (i = 0; i < 2; ++i) {
+ for (i = 0; i < MAX_DSS_MANAGERS; ++i) {
struct omap_overlay_manager *mgr;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
@@ -1384,8 +1454,18 @@ int dss_init_overlay_managers(struct platform_device *pdev)
case 1:
mgr->name = "tv";
mgr->id = OMAP_DSS_CHANNEL_DIGIT;
- mgr->supported_displays = OMAP_DISPLAY_TYPE_VENC;
+ mgr->supported_displays =
+ OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI;
+ break;
+#ifdef CONFIG_ARCH_OMAP4
+ case 2:
+ mgr->name = "2lcd";
+ mgr->id = OMAP_DSS_CHANNEL_LCD2;
+ mgr->supported_displays =
+ OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_SDI |
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DSI;
break;
+#endif
}
mgr->set_device = &omap_dss_set_device;
@@ -1485,3 +1565,11 @@ struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
}
EXPORT_SYMBOL(omap_dss_get_overlay_manager);
+#ifdef L4_EXAMPLE
+static int ovl_mgr_apply_l4(struct omap_overlay_manager *mgr)
+{
+ DSSDBG("omap_dss_mgr_apply_l4(%s)\n", mgr->name);
+ return 0;
+}
+#endif
+
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index b7f9a7339842..e076d5760b51 100644..100755
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -35,6 +35,12 @@
#include "dss.h"
+#ifdef CONFIG_ARCH_OMAP4
+#define MAX_DSS_OVERLAYS 4
+#else
+#define MAX_DSS_OVERLAYS 3
+#endif
+
static int num_overlays;
static struct list_head overlay_list;
@@ -63,7 +69,8 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
if (len > 0) {
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
mgr = omap_dss_get_overlay_manager(i);
-
+ if (mgr == NULL)
+ return -EINVAL;
if (strncmp(buf, mgr->name, len) == 0)
break;
@@ -193,7 +200,7 @@ static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (ovl->info.enabled)? "true" : "false");
}
static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
@@ -233,12 +240,14 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
ovl->get_overlay_info(ovl, &info);
- /* Video1 plane does not support global alpha
+#ifndef CONFIG_ARCH_OMAP4
+ /* In OMAP2/3: Video1 plane does not support global alpha
* to always make it 255 completely opaque
*/
if (ovl->id == OMAP_DSS_VIDEO1)
info.global_alpha = 255;
else
+#endif
info.global_alpha = simple_strtoul(buf, NULL, 10);
r = ovl->set_overlay_info(ovl, &info);
@@ -254,6 +263,38 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
return size;
}
+#ifdef CONFIG_ARCH_OMAP4
+static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ovl->info.zorder);
+}
+
+static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.zorder = simple_strtoul(buf, NULL, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+#endif
+
struct overlay_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay *, char *);
@@ -278,6 +319,11 @@ static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
overlay_global_alpha_show, overlay_global_alpha_store);
+#ifdef CONFIG_ARCH_OMAP4
+static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
+ overlay_zorder_show, overlay_zorder_store);
+#endif
+
static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_name.attr,
&overlay_attr_manager.attr,
@@ -287,6 +333,9 @@ static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_output_size.attr,
&overlay_attr_enabled.attr,
&overlay_attr_global_alpha.attr,
+#ifdef CONFIG_ARCH_OMAP4
+ &overlay_attr_zorder.attr,
+#endif
NULL
};
@@ -391,6 +440,15 @@ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
return -EINVAL;
}
+#ifdef CONFIG_ARCH_OMAP4
+ if ((info->zorder < OMAP_DSS_OVL_ZORDER_0) ||
+ (info->zorder > OMAP_DSS_OVL_ZORDER_3)) {
+ DSSERR("overlay doesn't support zorder %d\n", info->zorder);
+ return -EINVAL;
+ }
+
+#endif
+
return 0;
}
@@ -477,7 +535,6 @@ static int omap_dss_unset_manager(struct omap_overlay *ovl)
r = ovl->wait_for_go(ovl);
if (r)
return r;
-
ovl->manager = NULL;
return 0;
@@ -509,11 +566,11 @@ static void omap_dss_add_overlay(struct omap_overlay *overlay)
list_add_tail(&overlay->list, &overlay_list);
}
-static struct omap_overlay *dispc_overlays[3];
+static struct omap_overlay *dispc_overlays[MAX_DSS_OVERLAYS];
void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
{
- mgr->num_overlays = 3;
+ mgr->num_overlays = MAX_DSS_OVERLAYS;
mgr->overlays = dispc_overlays;
}
@@ -534,7 +591,7 @@ void dss_init_overlays(struct platform_device *pdev)
num_overlays = 0;
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < MAX_DSS_OVERLAYS; ++i) {
struct omap_overlay *ovl;
ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
@@ -544,32 +601,58 @@ void dss_init_overlays(struct platform_device *pdev)
case 0:
ovl->name = "gfx";
ovl->id = OMAP_DSS_GFX;
- ovl->supported_modes = cpu_is_omap34xx() ?
+ ovl->supported_modes = (cpu_is_omap44xx() |
+ cpu_is_omap34xx()) ?
OMAP_DSS_COLOR_GFX_OMAP3 :
OMAP_DSS_COLOR_GFX_OMAP2;
ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+#ifdef CONFIG_ARCH_OMAP4
+ ovl->info.zorder = OMAP_DSS_OVL_ZORDER_0;
+#endif
break;
case 1:
ovl->name = "vid1";
ovl->id = OMAP_DSS_VIDEO1;
- ovl->supported_modes = cpu_is_omap34xx() ?
+ ovl->supported_modes = (cpu_is_omap44xx() |
+ cpu_is_omap34xx()) ?
OMAP_DSS_COLOR_VID1_OMAP3 :
OMAP_DSS_COLOR_VID_OMAP2;
ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+#ifdef CONFIG_ARCH_OMAP4
+ ovl->info.zorder = OMAP_DSS_OVL_ZORDER_3;
+#endif
break;
case 2:
ovl->name = "vid2";
ovl->id = OMAP_DSS_VIDEO2;
- ovl->supported_modes = cpu_is_omap34xx() ?
+ ovl->supported_modes = (cpu_is_omap44xx() |
+ cpu_is_omap34xx()) ?
OMAP_DSS_COLOR_VID2_OMAP3 :
OMAP_DSS_COLOR_VID_OMAP2;
ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+#ifdef CONFIG_ARCH_OMAP4
+ ovl->info.zorder = OMAP_DSS_OVL_ZORDER_2;
+#endif
break;
+#ifdef CONFIG_ARCH_OMAP4
+ case 3:
+ ovl->name = "vid3";
+ ovl->id = OMAP_DSS_VIDEO3;
+ ovl->supported_modes = OMAP_DSS_COLOR_VID3_OMAP3;
+ ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
+ OMAP_DSS_OVL_CAP_DISPC;
+ ovl->info.global_alpha = 255;
+#ifdef CONFIG_ARCH_OMAP4
+ ovl->info.zorder = OMAP_DSS_OVL_ZORDER_1;
+#endif
+
+ break;
+#endif
}
ovl->set_manager = &omap_dss_set_manager;
@@ -626,31 +709,55 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
int i;
struct omap_overlay_manager *lcd_mgr;
struct omap_overlay_manager *tv_mgr;
+#ifdef CONFIG_ARCH_OMAP4
+ struct omap_overlay_manager *lcd2_mgr;
+#endif
struct omap_overlay_manager *mgr = NULL;
lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
- if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
- if (!lcd_mgr->device || force) {
- if (lcd_mgr->device)
- lcd_mgr->unset_device(lcd_mgr);
- lcd_mgr->set_device(lcd_mgr, dssdev);
- mgr = lcd_mgr;
+#ifdef CONFIG_ARCH_OMAP4
+ lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2);
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+ if (lcd2_mgr) {
+ if (!lcd2_mgr->device || force || sysfs_streq(dssdev->name, "2lcd")) {
+ if (lcd2_mgr->device)
+ lcd2_mgr->unset_device(lcd2_mgr);
+ lcd2_mgr->set_device(lcd2_mgr, dssdev);
+ mgr = lcd2_mgr;
+ }
+ }
+ } else
+#endif
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_VENC
+ && dssdev->type != OMAP_DISPLAY_TYPE_HDMI) {
+ if (lcd2_mgr) {
+ if (!lcd_mgr->device || force) {
+ if (lcd_mgr->device)
+ lcd_mgr->unset_device(lcd_mgr);
+ lcd_mgr->set_device(lcd_mgr, dssdev);
+ mgr = lcd_mgr;
+ }
}
}
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
- if (!tv_mgr->device || force) {
- if (tv_mgr->device)
- tv_mgr->unset_device(tv_mgr);
- tv_mgr->set_device(tv_mgr, dssdev);
- mgr = tv_mgr;
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
+ if (tv_mgr) {
+ if (!tv_mgr->device || force) {
+ if (tv_mgr->device)
+ tv_mgr->unset_device(tv_mgr);
+ tv_mgr->set_device(tv_mgr, dssdev);
+ mgr = tv_mgr;
+ }
}
}
if (mgr) {
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < MAX_DSS_OVERLAYS; i++) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
if (!ovl->manager || force) {
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index d0b3006ad8a5..26a3745628d7 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -380,9 +380,11 @@ void rfbi_transfer_area(u16 width, u16 height,
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dispc_set_lcd_size(width, height);
+ dispc_set_lcd_size(OMAP_DSS_CHANNEL_LCD, width, height);
+ /* TODO: update for LCD2*/
- dispc_enable_lcd_out(1);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
rfbi.framedone_callback = callback;
rfbi.framedone_callback_data = data;
@@ -1255,11 +1257,14 @@ static int rfbi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+ /* TODO: change here for LCD2 support */
+ dispc_set_lcd_display_type(OMAP_DSS_CHANNEL_LCD,
+ OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
+ dispc_set_parallel_interface_mode(OMAP_DSS_CHANNEL_LCD,
+ OMAP_DSS_PARALLELMODE_RFBI);
- dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+ dispc_set_tft_data_lines(OMAP_DSS_CHANNEL_LCD, dssdev->ctrl.pixel_size);
rfbi_configure(dssdev->phy.rfbi.channel,
dssdev->ctrl.pixel_size,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index c24f307d3da1..d5f5e1a7192b 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -34,10 +34,14 @@ static struct {
static void sdi_basic_init(void)
{
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
- dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_tft_data_lines(24);
+ dispc_set_parallel_interface_mode(OMAP_DSS_CHANNEL_LCD,
+ OMAP_DSS_PARALLELMODE_BYPASS);
+
+ /* TODO: change here for LCD2 support */
+ dispc_set_lcd_display_type(OMAP_DSS_CHANNEL_LCD,
+ OMAP_DSS_LCD_DISPLAY_TFT);
+ dispc_set_tft_data_lines(OMAP_DSS_CHANNEL_LCD, 24);
dispc_lcd_enable_signal_polarity(1);
}
@@ -72,8 +76,9 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
/* 15.5.9.1.2 */
dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
- dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
- dssdev->panel.acb);
+ /* TODO: update for LCD2 here */
+ dispc_set_pol_freq(OMAP_DSS_CHANNEL_LCD, dssdev->panel.config,
+ dssdev->panel.acbi, dssdev->panel.acb);
if (!sdi.skip_init) {
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
@@ -100,8 +105,8 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
t->pixel_clock = pck;
}
-
- dispc_set_lcd_timings(t);
+ /* TODO: if needed, add LCD2 support here*/
+ dispc_set_lcd_timings(OMAP_DSS_CHANNEL_LCD, t);
r = dss_set_clock_div(&dss_cinfo);
if (r)
@@ -119,7 +124,8 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
}
- dispc_enable_lcd_out(1);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
if (dssdev->driver->enable) {
r = dssdev->driver->enable(dssdev);
@@ -133,7 +139,8 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
return 0;
err3:
- dispc_enable_lcd_out(0);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
err2:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
err1:
@@ -156,7 +163,8 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
if (dssdev->driver->disable)
dssdev->driver->disable(dssdev);
- dispc_enable_lcd_out(0);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
dss_sdi_disable();
@@ -175,7 +183,8 @@ static int sdi_display_suspend(struct omap_dss_device *dssdev)
if (dssdev->driver->suspend)
dssdev->driver->suspend(dssdev);
- dispc_enable_lcd_out(0);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
dss_sdi_disable();
@@ -200,7 +209,8 @@ static int sdi_display_resume(struct omap_dss_device *dssdev)
goto err;
mdelay(2);
- dispc_enable_lcd_out(1);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
if (dssdev->driver->resume)
dssdev->driver->resume(dssdev);
@@ -220,10 +230,11 @@ static int sdi_display_set_update_mode(struct omap_dss_device *dssdev,
return -EINVAL;
if (mode == OMAP_DSS_UPDATE_DISABLED) {
- dispc_enable_lcd_out(0);
+ /* TODO: change here if LCD2 support is needed */
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 0);
sdi.update_enabled = 0;
} else {
- dispc_enable_lcd_out(1);
+ dispc_enable_lcd_out(OMAP_DSS_CHANNEL_LCD, 1);
sdi.update_enabled = 1;
}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index bb694cc52a50..8cba59ff9715 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -2,6 +2,7 @@ menuconfig FB_OMAP2
tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
depends on FB && OMAP2_DSS
+ default y
select OMAP2_VRAM
select OMAP2_VRFB
select FB_CFB_FILLRECT
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 4c4bafdfaa43..2f6d2802aa12 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -731,7 +731,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- if (!display->enable_te) {
+ if (!display || !display->enable_te) {
r = -ENODEV;
break;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index ef299839858a..a04a6a6b5fc3 100644..100755
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -31,7 +31,15 @@
#include <plat/display.h>
#include <plat/vram.h>
+#ifndef CONFIG_ARCH_OMAP4
#include <plat/vrfb.h>
+#endif
+
+#define MYCONFIG_ARCH_OMAP4
+
+#ifdef MYCONFIG_ARCH_OMAP4 /* TODO: correct this!*/
+#include <mach/tiler.h>
+#endif
#include "omapfb.h"
@@ -43,13 +51,15 @@
static char *def_mode;
static char *def_vram;
static int def_vrfb;
+static int def_tiler;
static int def_rotate;
static int def_mirror;
+static int def_numfb;
#ifdef DEBUG
unsigned int omapfb_debug;
module_param_named(debug, omapfb_debug, bool, 0644);
-static unsigned int omapfb_test_pattern;
+static unsigned int omapfb_test_pattern; /* change this to 0 to test logo */
module_param_named(test, omapfb_test_pattern, bool, 0644);
#endif
@@ -102,7 +112,7 @@ static void fill_fb(struct fb_info *fbi)
if (!addr)
return;
- DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length);
+ DBG("fill_fb %dx%d, line_len %d bytes, bpp = %d\n", w, h, fbi->fix.line_length, var->bits_per_pixel);
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
@@ -360,6 +370,7 @@ static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var,
default:
return -EINVAL;
}
+ /* DBG("fb_mode_to_dss_mode bits_per_pixel = %d\n", var->bits_per_pixel);*/
for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
struct omapfb_colormode *m = &omapfb_colormodes[i];
@@ -438,10 +449,14 @@ static void shrink_width(unsigned long max_frame_size,
static int check_vrfb_fb_size(unsigned long region_size,
const struct fb_var_screeninfo *var)
{
+#ifndef CONFIG_ARCH_OMAP4
unsigned long min_phys_size = omap_vrfb_min_phys_size(var->xres_virtual,
var->yres_virtual, var->bits_per_pixel >> 3);
return min_phys_size > region_size ? -EINVAL : 0;
+#else
+ return -EINVAL;
+#endif
}
static int check_fb_size(const struct omapfb_info *ofbi,
@@ -452,6 +467,7 @@ static int check_fb_size(const struct omapfb_info *ofbi,
unsigned long line_size = var->xres_virtual * bytespp;
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+#ifndef CONFIG_ARCH_OMAP4
/* One needs to check for both VRFB and OMAPFB limitations. */
if (check_vrfb_fb_size(max_frame_size, var))
shrink_height(omap_vrfb_max_height(
@@ -464,6 +480,9 @@ static int check_fb_size(const struct omapfb_info *ofbi,
}
return 0;
+#else
+ return -EINVAL;
+#endif
}
DBG("max frame size %lu, line size %lu\n", max_frame_size, line_size);
@@ -543,7 +562,7 @@ static int setup_vrfb_rotation(struct fb_info *fbi)
if (vrfb->vaddr[0])
return 0;
-
+#ifndef CONFIG_ARCH_OMAP4
omap_vrfb_setup(&rg->vrfb, rg->paddr,
var->xres_virtual,
var->yres_virtual,
@@ -553,7 +572,9 @@ static int setup_vrfb_rotation(struct fb_info *fbi)
r = omap_vrfb_map_angle(vrfb, var->yres_virtual, 0);
if (r)
return r;
-
+#else
+ return -EINVAL;
+#endif
/* used by open/write in fbmem.c */
fbi->screen_base = ofbi->region.vrfb.vaddr[0];
@@ -583,6 +604,9 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
struct omapfb_colormode *mode = &omapfb_colormodes[i];
+ if (!mode)
+ return -ENOENT;
+
if (dssmode == mode->dssmode) {
assign_colormode_to_var(var, mode);
return 0;
@@ -603,6 +627,8 @@ void set_fb_fix(struct fb_info *fbi)
/* used by open/write in fbmem.c */
fbi->screen_base = (char __iomem *)omapfb_get_region_vaddr(ofbi);
+ DBG("changing rotation to %d\n", var->rotate);
+
/* used by mmap in fbmem.c */
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
switch (var->nonstd) {
@@ -618,13 +644,16 @@ void set_fb_fix(struct fb_info *fbi)
}
fix->smem_len = var->yres_virtual * fix->line_length;
- } else {
+ } else if (ofbi->rotation_type != OMAP_DSS_ROT_TILER) {
fix->line_length =
(var->xres_virtual * var->bits_per_pixel) >> 3;
- fix->smem_len = rg->size;
+
+ /* tiler line length is set during allocation, and cannot
+ be changed */
}
fix->smem_start = omapfb_get_region_paddr(ofbi);
+ fix->smem_len = rg->size;
fix->type = FB_TYPE_PACKED_PIXELS;
@@ -850,14 +879,21 @@ static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
DBG("setup_overlay %d, posx %d, posy %d, outw %d, outh %d\n", ofbi->id,
posx, posy, outw, outh);
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ xres = var->xres;
+ yres = var->yres;
+ } else {
if (rotation == FB_ROTATE_CW || rotation == FB_ROTATE_CCW) {
xres = var->yres;
yres = var->xres;
} else {
xres = var->xres;
yres = var->yres;
+ }
}
+ offset = ((var->yoffset * var->xres_virtual +
+ var->xoffset) * var->bits_per_pixel) >> 3;
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
@@ -910,7 +946,14 @@ static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
info.paddr = data_start_p;
info.vaddr = data_start_v;
info.screen_width = screen_width;
- info.width = xres;
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ info.width =
+ ((rotation == 1) | (rotation == 3)) ? yres : xres;
+ } else {
+ info.width = xres;
+ }
+
info.height = yres;
info.color_mode = mode;
info.rotation_type = ofbi->rotation_type;
@@ -919,7 +962,14 @@ static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
info.pos_x = posx;
info.pos_y = posy;
- info.out_width = outw;
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ info.out_width =
+ ((rotation == 1) | (rotation == 3)) ? outh : outw;
+ } else {
+ info.width = outw;
+ }
+
info.out_height = outh;
r = ovl->set_overlay_info(ovl, &info);
@@ -966,6 +1016,10 @@ int omapfb_apply_changes(struct fb_info *fbi, int init)
if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
int rotation = (var->rotate + ofbi->rotation[i]) % 4;
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ outw = var->xres;
+ outh = var->yres;
+ } else {
if (rotation == FB_ROTATE_CW ||
rotation == FB_ROTATE_CCW) {
outw = var->yres;
@@ -973,10 +1027,23 @@ int omapfb_apply_changes(struct fb_info *fbi, int init)
} else {
outw = var->xres;
outh = var->yres;
+ }
}
} else {
- outw = ovl->info.out_width;
- outh = ovl->info.out_height;
+ /*sv it comes here for vid1 on fb */
+ DBG("its vid pipeline so sclaing is enabled, still we will not scale for output size,just maintain the input size");
+ int rotation = (var->rotate + ofbi->rotation[i]) % 4;
+ if(rotation == FB_ROTATE_CW ||
+ rotation == FB_ROTATE_CCW){
+ outw = var->yres;
+ outh = var->xres;
+ } else {
+ DBG("info.out_width = %d, info.out_height = %d take care of this for vid pipeline ",ovl->info.out_width,ovl->info.out_height);
+ /*svoutw = ovl->info.out_width;
+ outh = ovl->info.out_height;*/
+ outw = var->xres;
+ outh = var->yres;
+ }
}
if (init) {
@@ -994,6 +1061,7 @@ int omapfb_apply_changes(struct fb_info *fbi, int init)
if (!init && ovl->manager)
ovl->manager->apply(ovl->manager);
}
+ DBG("apply changes done from omapfb"); //sv
return 0;
err:
DBG("apply_changes failed\n");
@@ -1035,7 +1103,8 @@ static int omapfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct fb_var_screeninfo new_var;
- int r;
+ int r = 0;
+ struct omap_dss_device *display = fb2display(fbi);
DBG("pan_display(%d)\n", FB2OFB(fbi)->id);
@@ -1051,6 +1120,9 @@ static int omapfb_pan_display(struct fb_var_screeninfo *var,
r = omapfb_apply_changes(fbi, 0);
+ if (display && display->update)
+ display->update(display, 0, 0, var->xres, var->yres);
+
return r;
}
@@ -1098,7 +1170,24 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
- vma->vm_pgoff = off >> PAGE_SHIFT;
+ vma->vm_private_data = ofbi;
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ int k = 0, p = fix->line_length;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &mmap_user_ops; /* &dmm_remap_vm_ops; */
+
+ /* we need to figure out the height of the block. */
+ for (k = 0; k < len; k += p) {
+ /* map each page of the line */
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ off >> PAGE_SHIFT, p, vma->vm_page_prot))
+ return -EAGAIN;
+ off += 2*64*TILER_WIDTH;
+ }
+ } else {
+ vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_flags |= VM_IO | VM_RESERVED;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &mmap_user_ops;
@@ -1106,6 +1195,7 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
+ }
/* vm_ops.open won't be called for mmap itself. */
atomic_inc(&ofbi->map_count);
return 0;
@@ -1214,6 +1304,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
int do_update = 0;
int r = 0;
+ if (!display) {
+ printk(KERN_ERR "omapfb_blank: Unabled to find display\n");
+ r = -EINVAL;
+ goto err0;
+ }
omapfb_lock(fbdev);
switch (blank) {
@@ -1258,7 +1353,7 @@ exit:
r = display->update(display, 0, 0, w, h);
}
-
+err0:
return r;
}
@@ -1299,19 +1394,25 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
rg = &ofbi->region;
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ tiler_free(rg->paddr);
+ } else {
if (rg->paddr)
if (omap_vram_free(rg->paddr, rg->size))
dev_err(fbdev->dev, "VRAM FREE failed\n");
+ }
if (rg->vaddr)
iounmap(rg->vaddr);
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+#ifndef CONFIG_ARCH_OMAP4
/* unmap the 0 angle rotation */
if (rg->vrfb.vaddr[0]) {
iounmap(rg->vrfb.vaddr[0]);
- omap_vrfb_release_ctx(&rg->vrfb);
+ //omap_vrfb_release_ctx(&rg->vrfb);
}
+#endif
}
rg->vaddr = NULL;
@@ -1350,6 +1451,10 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
struct omapfb2_mem_region *rg;
void __iomem *vaddr;
int r;
+ u16 h = 0, w = 0;
+ unsigned long pstride;
+ size_t psize;
+
rg = &ofbi->region;
memset(rg, 0, sizeof(*rg));
@@ -1358,7 +1463,20 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
if (!paddr) {
DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
- r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr);
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ int err = 0xFFFFFFFF;
+ /* get width & height from line length & size */
+ w = fbi->fix.line_length /
+ (fbi->var.bits_per_pixel >> 3);
+ h = size / fbi->fix.line_length;
+ err = tiler_alloc(TILFMT_32BIT, w, h, &paddr);
+ if (err != 0x0)
+ return -ENOMEM;
+ r = 0;
+ } else {
+ r = omap_vram_alloc(OMAPFB_MEMTYPE_SDRAM, size, &paddr);
+ }
} else {
DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
ofbi->id);
@@ -1370,18 +1488,28 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
return -ENOMEM;
}
- if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
+ if (ofbi->rotation_type == OMAP_DSS_ROT_DMA) {
vaddr = ioremap_wc(paddr, size);
if (!vaddr) {
- dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
+ dev_err(fbdev->dev,
+ "failed to ioremap framebuffer\n");
omap_vram_free(paddr, size);
return -ENOMEM;
}
+ } else if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ pstride = tiler_stride(tiler_get_natural_addr(paddr));
+ psize = h * pstride;
+ vaddr = __arm_multi_strided_ioremap(1, &paddr, &psize,
+ &pstride, (unsigned long *) &fbi->fix.line_length,
+ MT_DEVICE_WC);
+ if (vaddr == NULL)
+ return -ENOMEM;
- DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
- } else {
- r = omap_vrfb_request_ctx(&rg->vrfb);
+ /* DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr); */
+ DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
+ } else if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ //r = omap_vrfb_request_ctx(&rg->vrfb);
if (r) {
dev_err(fbdev->dev, "vrfb create ctx failed\n");
return r;
@@ -1389,7 +1517,6 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
vaddr = NULL;
}
-
rg->paddr = paddr;
rg->vaddr = vaddr;
rg->size = size;
@@ -1423,20 +1550,37 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
break;
}
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ fbi->var.bits_per_pixel = 32; /* always 32-bit for tiler */
+ bytespp = fbi->var.bits_per_pixel >> 3;
+ }
+
if (!size) {
u16 w, h;
display->get_resolution(display, &w, &h);
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
- size = max(omap_vrfb_min_phys_size(w, h, bytespp),
- omap_vrfb_min_phys_size(h, w, bytespp));
+ //size = max(omap_vrfb_min_phys_size(w, h, bytespp),
+ // omap_vrfb_min_phys_size(h, w, bytespp));
DBG("adjusting fb mem size for VRFB, %u -> %lu\n",
w * h * bytespp, size);
- } else {
- size = w * h * bytespp;
+ int oldw = w, oldh = h;
+ /* Because we change the resolution of the 0 degree
+ * view, we need to alloc max(w, h) for height */
+ h = max(w, h);
+ w = 2048;
+ DBG("adjusting fb mem size for TILER, %dx%d -> %dx%d\n",
+ oldw, oldh, w, h);
+ } else if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ /* round up width to tiler size */
+ w = ALIGN(w, PAGE_SIZE / bytespp);
+ fbi->fix.line_length = w * bytespp;
}
+
+ size = w * h * bytespp;
+
}
if (!size)
@@ -1590,9 +1734,11 @@ static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
}
for (i = 0; i < fbdev->num_fbs; i++) {
- /* allocate memory automatically only for fb0, or if
- * excplicitly defined with vram or plat data option */
- if (i == 0 || vram_sizes[i] != 0) {
+ /* allocate memory automatically only for fb0, fb1, fb2 or if
+ * excplicitly defined with vram or plat data option
+ * fb1 and fb2 support added by Mayuresh
+ */
+ if (i == 0 || i == 1 || i == 2 || vram_sizes[i] != 0) {
r = omapfb_alloc_fbmem_display(fbdev->fbs[i],
vram_sizes[i], vram_paddrs[i]);
@@ -1743,9 +1889,13 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
if (display) {
u16 w, h;
int rotation = (var->rotate + ofbi->rotation[0]) % 4;
-
+ printk(KERN_ERR "rotation = %d, var->rotate = %d ",rotation,var->rotate);
display->get_resolution(display, &w, &h);
+ if (ofbi->rotation_type == OMAP_DSS_ROT_TILER) {
+ var->xres = w;
+ var->yres = h;
+ } else {
if (rotation == FB_ROTATE_CW ||
rotation == FB_ROTATE_CCW) {
var->xres = h;
@@ -1753,6 +1903,7 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
} else {
var->xres = w;
var->yres = h;
+ }
}
var->xres_virtual = var->xres;
@@ -1838,14 +1989,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
{
- int r, i;
+ int r, i, fb_no;
fbdev->num_fbs = 0;
- DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS);
+ /* DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS); */
+
+ /* decide number of framebuffers to be created */
+ /* If value is specified in bootargs use it */
+ if (def_numfb)
+ fb_no = def_numfb;
+ else /* otherwise use the value from config */
+ fb_no = CONFIG_FB_OMAP2_NUM_FBS;
/* allocate fb_infos */
- for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) {
+ for (i = 0; i < fb_no; i++) {
struct fb_info *fbi;
struct omapfb_info *ofbi;
@@ -1867,8 +2025,14 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
ofbi->id = i;
/* assign these early, so that fb alloc can use them */
- ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB :
- OMAP_DSS_ROT_DMA;
+ if (def_vrfb == 1) {
+ ofbi->rotation_type = OMAP_DSS_ROT_VRFB;
+ } else if (def_tiler == 1) {
+ ofbi->rotation_type = OMAP_DSS_ROT_TILER;
+ } else {
+ ofbi->rotation_type = OMAP_DSS_ROT_DMA;
+ }
+
ofbi->mirror = def_mirror;
fbdev->num_fbs++;
@@ -1881,6 +2045,7 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
ofbi->overlays[0] = fbdev->overlays[i];
+
ofbi->num_overlays = 1;
}
@@ -2040,6 +2205,11 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
int r = 0;
str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
+ if (!str) {
+ dev_err(&fbdev->dev, "unable to allocate memory for a string\n");
+ r = -EINVAL;
+ goto err0;
+ }
strcpy(str, def_mode);
options = str;
@@ -2078,7 +2248,7 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
}
kfree(str);
-
+err0:
return r;
}
@@ -2153,6 +2323,11 @@ static int omapfb_probe(struct platform_device *pdev)
/* gfx overlay should be the default one. find a display
* connected to that, and use it as default display */
ovl = omap_dss_get_overlay(0);
+ if (!ovl) {
+ dev_warn(&pdev->dev, "could not find overlay ");
+ goto err0;
+ }
+
if (ovl->manager && ovl->manager->device) {
def_display = ovl->manager->device;
} else {
@@ -2247,7 +2422,9 @@ module_param_named(mode, def_mode, charp, 0);
module_param_named(vram, def_vram, charp, 0);
module_param_named(rotate, def_rotate, int, 0);
module_param_named(vrfb, def_vrfb, bool, 0);
+module_param_named(tiler, def_tiler, bool, 0);
module_param_named(mirror, def_mirror, bool, 0);
+module_param_named(numfb, def_numfb, int, 0);
/* late_initcall to let panel/ctrl drivers loaded first.
* I guess better option would be a more dynamic approach,
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index f7c9c739e5ef..3594a2a814f2 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -28,7 +28,7 @@
#endif
#include <plat/display.h>
-
+#include <plat/vrfb.h>
#ifdef DEBUG
extern unsigned int omapfb_debug;
#define DBG(format, ...) \
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
index 55a4de5e5d10..d03846ee0341 100644
--- a/drivers/video/omap2/vram.c
+++ b/drivers/video/omap2/vram.c
@@ -548,6 +548,10 @@ void __init omap_vram_reserve_sdram(void)
}
#endif
+ /* changed for HDMI 1080p test */
+ size = 1920 * 1920 * 4;
+ paddr = 0;
+
if (!size)
return;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index eaf36364b7d4..d5ded454c900 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -203,6 +203,13 @@ struct mmc_host {
struct dentry *debugfs_root;
+#ifdef CONFIG_TIWLAN_SDIO
+ struct {
+ struct sdio_cis *cis;
+ struct sdio_embedded_func *funcs;
+ } embedded_sdio_data;
+#endif
+
unsigned long private[0] ____cacheline_aligned;
};
@@ -211,6 +218,12 @@ extern int mmc_add_host(struct mmc_host *);
extern void mmc_remove_host(struct mmc_host *);
extern void mmc_free_host(struct mmc_host *);
+#ifdef CONFIG_TIWLAN_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_embedded_func *funcs);
+#endif
+
static inline void *mmc_priv(struct mmc_host *host)
{
return (void *)host->private;
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index ac3ab683fec6..4d77c9cc76e3 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -20,6 +20,16 @@ struct sdio_func;
typedef void (sdio_irq_handler_t)(struct sdio_func *);
+#ifdef CONFIG_TIWLAN_SDIO
+/*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+ uint8_t f_class;
+ uint32_t f_maxblksize;
+};
+#endif
+
/*
* SDIO function CIS tuple (unknown to the core)
*/
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 33b2ea09a4ad..770f311fcdb9 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -35,6 +35,11 @@
#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
+#ifdef CONFIG_TIWLAN_SDIO
+#define SDIO_VENDOR_ID_TI 0x104c
+#define SDIO_DEVICE_ID_TI_WL12xx 0x9066
+#endif
+
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
index f46c40ac6d45..42e054b08b12 100644
--- a/include/linux/omapfb.h
+++ b/include/linux/omapfb.h
@@ -25,8 +25,9 @@
#define __LINUX_OMAPFB_H__
#include <linux/fb.h>
-#include <linux/ioctl.h>
-#include <linux/types.h>
+
+#include <asm/ioctl.h>
+#include <asm/types.h>
/* IOCTL commands. */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 8c3dd36fe91a..e02b945d74c9 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -182,6 +182,9 @@
/* Aeroflex Gaisler GRLIB APBUART */
#define PORT_APBUART 90
+/* TI OMAP-UART */
+#define PORT_OMAP 91
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index d4962a782b8a..151ea94aab8d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1396,6 +1396,18 @@ struct v4l2_rds_data {
#define V4L2_RDS_BLOCK_ERROR 0x80
/*
+ * Color conversion
+ * User needs to pass pointer to color conversion matrix
+ * defined by hardware
+ */
+struct v4l2_color_space_conversion {
+ __s32 coefficients[3][3];
+ __s32 const_factor;
+ __s32 input_offs[3];
+ __s32 output_offs[3];
+};
+
+/*
* A U D I O
*/
struct v4l2_audio {
@@ -1741,6 +1753,9 @@ struct v4l2_dbg_chip_ident {
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+#define VIDIOC_S_COL_SPC_CONV _IOW('V', 89, struct v4l2_color_space_conversion)
+#define VIDIOC_G_COL_SPC_CONV _IOR('V', 90, struct v4l2_color_space_conversion)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index e8ba0f2efbae..63c0fb8d27cb 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -257,6 +257,11 @@ struct v4l2_ioctl_ops {
/* For other private ioctls */
long (*vidioc_default) (struct file *file, void *fh,
int cmd, void *arg);
+
+ int (*vidioc_s_color_space_conv)(struct file *file, void *fh,
+ struct v4l2_color_space_conversion *a);
+ int (*vidioc_g_color_space_conv)(struct file *file, void *fh,
+ struct v4l2_color_space_conversion *a);
};