From f0efc831d9439589efaf6406695470eca93ba08d Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Tue, 16 Jan 2018 16:26:01 -0500 Subject: drm/msm/dsi: check for failure on retrieving pll in dsi manager Make msm_dsi_pll_init consistently return an error code instead of NULL when pll initialization fails so that later pll retrieval can check against an error code. Add checks for these failures after retrieval of src_pll to avoid invalid pointer dereferences later in msm_dsi_pll_get_clk_provider. Signed-off-by: Lloyd Atkinson Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_manager.c | 4 ++++ drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 6 +++--- drivers/gpu/drm/msm/dsi/pll/dsi_pll.c | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 855248132b2b..1a54fd67c9c4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -88,6 +88,8 @@ static int dsi_mgr_setup_components(int id) msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + if (IS_ERR(src_pll)) + return PTR_ERR(src_pll); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); } else if (!other_dsi) { ret = 0; @@ -116,6 +118,8 @@ static int dsi_mgr_setup_components(int id) msm_dsi_phy_set_usecase(clk_slave_dsi->phy, MSM_DSI_PHY_SLAVE); src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); + if (IS_ERR(src_pll)) + return PTR_ERR(src_pll); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 790ca280cbfd..c8bfaa780651 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -503,10 +503,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) goto fail; phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); - if (!phy->pll) + if (IS_ERR_OR_NULL(phy->pll)) dev_info(dev, - "%s: pll init failed, need separate pll clk driver\n", - __func__); + "%s: pll init failed: %ld, need separate pll clk driver\n", + __func__, PTR_ERR(phy->pll)); dsi_phy_disable_resource(phy); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index bc289f5c9078..491f08dce969 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -173,7 +173,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, if (IS_ERR(pll)) { dev_err(dev, "%s: failed to init DSI PLL\n", __func__); - return NULL; + return pll; } pll->type = type; -- cgit v1.2.3 From 6e1787cf45e48866c01dadc2a1b6c3d63d75b8d1 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Tue, 16 Jan 2018 16:26:02 -0500 Subject: drm/msm/dsi: correct DSI id bounds check during registration Check DSI instance id argument against the proper boundary size to protect against invalid configuration of the DSI id. Signed-off-by: Lloyd Atkinson Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_manager.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 1a54fd67c9c4..4cb1cb68878b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -862,7 +862,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) int id = msm_dsi->id; int ret; - if (id > DSI_MAX) { + if (id >= DSI_MAX) { pr_err("%s: invalid id %d\n", __func__, id); return -EINVAL; } -- cgit v1.2.3 From 3f0689e663524115b068258bab789dff1ddab5da Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Tue, 16 Jan 2018 16:26:03 -0500 Subject: drm/msm/dsi: check msm_dsi and dsi pointers before use Move null checks of pointer arguments to the beginning of the modeset init function since they are referenced immediately instead of after they have already been used. Signed-off-by: Lloyd Atkinson Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 98742d7af6dc..ee7e090e27b4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -196,7 +196,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_bridge *ext_bridge; int ret; - if (WARN_ON(!encoder)) + if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev)) return -EINVAL; msm_dsi->dev = dev; @@ -245,19 +245,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, return 0; fail: - if (msm_dsi) { - /* bridge/connector are normally destroyed by drm: */ - if (msm_dsi->bridge) { - msm_dsi_manager_bridge_destroy(msm_dsi->bridge); - msm_dsi->bridge = NULL; - } + /* bridge/connector are normally destroyed by drm: */ + if (msm_dsi->bridge) { + msm_dsi_manager_bridge_destroy(msm_dsi->bridge); + msm_dsi->bridge = NULL; + } - /* don't destroy connector if we didn't make it */ - if (msm_dsi->connector && !msm_dsi->external_bridge) - msm_dsi->connector->funcs->destroy(msm_dsi->connector); + /* don't destroy connector if we didn't make it */ + if (msm_dsi->connector && !msm_dsi->external_bridge) + msm_dsi->connector->funcs->destroy(msm_dsi->connector); - msm_dsi->connector = NULL; - } + msm_dsi->connector = NULL; return ret; } -- cgit v1.2.3 From 331dc0bc195bb77fcbe60b4513464b406a6d20cb Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 13 Dec 2017 15:12:56 -0500 Subject: drm/msm: add a5xx specific debugfs Add some debugfs to dump out PFP and ME microcontroller state, as well as some of the queues (MEQ and ROQ). Also add a debugfs file to trigger a GPU reset (and reloading the firmware on next submit). Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/Makefile | 2 + drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 188 +++++++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 1 + drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 4 + drivers/gpu/drm/msm/adreno/adreno_device.c | 6 + drivers/gpu/drm/msm/msm_debugfs.c | 5 +- drivers/gpu/drm/msm/msm_gpu.h | 2 + 7 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 92b3844202d2..ebe0c3d0b126 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -62,6 +62,8 @@ msm-y := \ msm_ringbuffer.o \ msm_submitqueue.o +msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o + msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c new file mode 100644 index 000000000000..cef09780ef17 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -0,0 +1,188 @@ +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include +#include +#include + +#include "a5xx_gpu.h" + +static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "PFP state:\n"); + + for (i = 0; i < 36; i++) { + gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); + } + + return 0; +} + +static int me_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "ME state:\n"); + + for (i = 0; i < 29; i++) { + gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); + } + + return 0; +} + +static int meq_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "MEQ state:\n"); + gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); + + for (i = 0; i < 64; i++) { + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); + } + + return 0; +} + +static int roq_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "ROQ state:\n"); + gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0); + + for (i = 0; i < 512 / 4; i++) { + uint32_t val[4]; + int j; + for (j = 0; j < 4; j++) + val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA); + drm_printf(p, " %02x: %08x %08x %08x %08x\n", i, + val[0], val[1], val[2], val[3]); + } + + return 0; +} + +static int show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_printer p = drm_seq_file_printer(m); + int (*show)(struct msm_gpu *gpu, struct drm_printer *p) = + node->info_ent->data; + + return show(priv->gpu, &p); +} + +#define ENT(n) { .name = #n, .show = show, .data = n ##_print } +static struct drm_info_list a5xx_debugfs_list[] = { + ENT(pfp), + ENT(me), + ENT(meq), + ENT(roq), +}; + +/* for debugfs files that can be written to, we can't use drm helper: */ +static int +reset_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (!capable(CAP_SYS_ADMIN)) + return -EINVAL; + + /* TODO do we care about trying to make sure the GPU is idle? + * Since this is just a debug feature limited to CAP_SYS_ADMIN, + * maybe it is fine to let the user keep both pieces if they + * try to reset an active GPU. + */ + + mutex_lock(&dev->struct_mutex); + + if (adreno_gpu->pm4) { + release_firmware(adreno_gpu->pm4); + adreno_gpu->pm4 = NULL; + } + + if (adreno_gpu->pfp) { + release_firmware(adreno_gpu->pfp); + adreno_gpu->pfp = NULL; + } + if (a5xx_gpu->pm4_bo) { + if (a5xx_gpu->pm4_iova) + msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); + drm_gem_object_unreference(a5xx_gpu->pm4_bo); + a5xx_gpu->pm4_bo = NULL; + } + + if (a5xx_gpu->pfp_bo) { + if (a5xx_gpu->pfp_iova) + msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); + drm_gem_object_unreference(a5xx_gpu->pfp_bo); + a5xx_gpu->pfp_bo = NULL; + } + + gpu->needs_hw_init = true; + + pm_runtime_get_sync(&gpu->pdev->dev); + gpu->funcs->recover(gpu); + + pm_runtime_put_sync(&gpu->pdev->dev); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); + + +int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + int ret; + + if (!minor) + return 0; + + ret = drm_debugfs_create_files(a5xx_debugfs_list, + ARRAY_SIZE(a5xx_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install a5xx_debugfs_list\n"); + return ret; + } + + ent = debugfs_create_file("reset", S_IWUGO, + minor->debugfs_root, + dev, &reset_fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7e09d44e4a15..579c28c8c994 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1195,6 +1195,7 @@ static const struct adreno_gpu_funcs funcs = { .destroy = a5xx_destroy, #ifdef CONFIG_DEBUG_FS .show = a5xx_show, + .debugfs_init = a5xx_debugfs_init, #endif .gpu_busy = a5xx_gpu_busy, }, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6fb8c2f9b9e4..7d71860c4bee 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -49,6 +49,10 @@ struct a5xx_gpu { #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) +#ifdef CONFIG_DEBUG_FS +int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); +#endif + /* * In order to do lockless preemption we use a simple state machine to progress * through the process. diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 62bdb7316da1..6263cb906b3c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -150,6 +150,12 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) return NULL; } + if (gpu->funcs->debugfs_init) { + gpu->funcs->debugfs_init(gpu, dev->primary); + gpu->funcs->debugfs_init(gpu, dev->render); + gpu->funcs->debugfs_init(gpu, dev->control); + } + return gpu; } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 1855182c76ce..ba74cb4f94df 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -161,8 +161,11 @@ int msm_debugfs_init(struct drm_minor *minor) return ret; } - if (priv->kms->funcs->debugfs_init) + if (priv->kms->funcs->debugfs_init) { ret = priv->kms->funcs->debugfs_init(priv->kms, minor); + if (ret) + return ret; + } return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index fccfccd303af..b8241179175a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -65,6 +65,8 @@ struct msm_gpu_funcs { #ifdef CONFIG_DEBUG_FS /* show GPU status in debugfs: */ void (*show)(struct msm_gpu *gpu, struct seq_file *m); + /* for generation specific debugfs: */ + int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); #endif int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value); }; -- cgit v1.2.3 From 6a8bd08d0465b2b8d214007c58598e2c15312296 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 13 Dec 2017 15:12:57 -0500 Subject: drm/msm: add sudo flag to submit ioctl This flags cause cmdstream to be executed from the ringbuffer (RB) instead of IB1. Normally not something you'd ever want to do, but it is super useful for firmware debugging. Hidden behind CAP_SYS_RAWIO and a default=n kconfig option which depends on EXPERT (and has a suitably scary warning), to prevent it from being used on accident. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/Kconfig | 13 +++++++ drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 65 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gem.h | 1 + drivers/gpu/drm/msm/msm_gem_submit.c | 9 +++++ include/uapi/drm/msm_drm.h | 2 ++ 5 files changed, 90 insertions(+) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 99d39b2aefa6..3065cb290aa8 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -28,6 +28,19 @@ config DRM_MSM_REGISTER_LOGGING that can be parsed by envytools demsm tool. If enabled, register logging can be switched on via msm.reglog=y module param. +config DRM_MSM_GPU_SUDO + bool "Enable SUDO flag on submits" + depends on DRM_MSM && EXPERT + default n + help + Enable userspace that has CAP_SYS_RAWIO to submit GPU commands + that are run from RB instead of IB1. This essentially gives + userspace kernel level access, but is useful for firmware + debugging. + + Only use this if you are a driver developer. This should *not* + be enabled for production kernels. If unsure, say N. + config DRM_MSM_HDMI_HDCP bool "Enable HDMI HDCP support in MSM DRM driver" depends on DRM_MSM && QCOM_SCM diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 579c28c8c994..fa08b4897a56 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -140,6 +140,65 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); } +static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + struct msm_gem_object *obj; + uint32_t *ptr, dwords; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (priv->lastctx == ctx) + break; + case MSM_SUBMIT_CMD_BUF: + /* copy commands into RB: */ + obj = submit->bos[submit->cmd[i].idx].obj; + dwords = submit->cmd[i].size; + + ptr = msm_gem_get_vaddr(&obj->base); + + /* _get_vaddr() shouldn't fail at this point, + * since we've already mapped it once in + * submit_reloc() + */ + if (WARN_ON(!ptr)) + return; + + for (i = 0; i < dwords; i++) { + /* normally the OUT_PKTn() would wait + * for space for the packet. But since + * we just OUT_RING() the whole thing, + * need to call adreno_wait_ring() + * ourself: + */ + adreno_wait_ring(ring, 1); + OUT_RING(ring, ptr[i]); + } + + msm_gem_put_vaddr(&obj->base); + + break; + } + } + + a5xx_flush(gpu, ring); + a5xx_preempt_trigger(gpu); + + /* we might not necessarily have a cmd from userspace to + * trigger an event to know that submit has completed, so + * do this manually: + */ + a5xx_idle(gpu, ring); + ring->memptrs->fence = submit->seqno; + msm_gpu_retire(gpu); +} + static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { @@ -149,6 +208,12 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { + priv->lastctx = NULL; + a5xx_submit_in_rb(gpu, submit, ctx); + return; + } + OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); OUT_RING(ring, 0x02); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 9320e184b48d..c5d9bd3e47a8 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -146,6 +146,7 @@ struct msm_gem_submit { struct msm_gpu_submitqueue *queue; struct pid *pid; /* submitting process */ bool valid; /* true if no cmdstream patching needed */ + bool in_rb; /* "sudo" mode, copy cmds into RB */ struct msm_ringbuffer *ring; unsigned int nr_cmds; unsigned int nr_bos; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b8dc8f96caf2..7bd83e0afa97 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -430,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) return -EINVAL; + if (args->flags & MSM_SUBMIT_SUDO) { + if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) || + !capable(CAP_SYS_RAWIO)) + return -EINVAL; + } + queue = msm_submitqueue_get(ctx, args->queueid); if (!queue) return -ENOENT; @@ -471,6 +477,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out_unlock; } + if (args->flags & MSM_SUBMIT_SUDO) + submit->in_rb = true; + ret = submit_lookup_objects(submit, args, file); if (ret) goto out; diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index bbbaffad772d..c06d0a5bdd80 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -201,10 +201,12 @@ struct drm_msm_gem_submit_bo { #define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */ #define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */ #define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */ +#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */ #define MSM_SUBMIT_FLAGS ( \ MSM_SUBMIT_NO_IMPLICIT | \ MSM_SUBMIT_FENCE_FD_IN | \ MSM_SUBMIT_FENCE_FD_OUT | \ + MSM_SUBMIT_SUDO | \ 0) /* Each cmdstream submit consists of a table of buffers involved, and -- cgit v1.2.3 From 6d5796af7136046835621ffe680eb15ce88500b6 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 11:35:24 +0530 Subject: drm/msm/dsi: Update generated headers for 10nm PLL/PHY Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.xml.h | 187 +++++++++++++++++++++++++++++++++++--- 1 file changed, 174 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 479086ccf180..f6a9471b70c8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) - -Copyright (C) 2013-2017 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) + +Copyright (C) 2013-2018 by the following authors: - Rob Clark (robclark) - Ilia Mirkin (imirkin) @@ -1556,5 +1547,175 @@ static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00 #define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108 +#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000 + +#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004 + +#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008 + +#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c + +#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010 + +#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014 + +#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018 + +#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c + +#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020 + +#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024 + +#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028 + +#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c + +#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030 + +#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034 + +#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8 + +#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec + +#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4 + +#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8 + +static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; } + +#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000 + +#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004 + +#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010 + +#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c + +#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020 + +#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024 + +#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c + +#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030 + +#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054 + +#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064 + +#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c + +#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080 + +#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094 + +#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4 + +#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8 + +#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4 + +#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc + +#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0 + +#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4 + +#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8 + +#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c + +#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110 + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114 + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118 + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120 + +#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c + +#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140 + +#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144 + +#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c + +#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154 + +#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c + +#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164 + +#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180 + +#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184 + +#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c + +#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0 + #endif /* DSI_XML */ -- cgit v1.2.3 From 973e02db35c2c4036693e32ed6f250eefd8c322c Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 11:35:25 +0530 Subject: drm/msm/dsi: Add skeleton 10nm PHY/PLL code Add new 10nm DSI PLL/PHY files that will be used on SDM845. Just populate empty pll/phy funcs for now. These will be filled up later. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/Kconfig | 7 ++ drivers/gpu/drm/msm/Makefile | 2 + drivers/gpu/drm/msm/dsi/dsi.h | 1 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 4 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 1 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c | 52 +++++++++ drivers/gpu/drm/msm/dsi/pll/dsi_pll.c | 3 + drivers/gpu/drm/msm/dsi/pll/dsi_pll.h | 9 ++ drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c | 176 +++++++++++++++++++++++++++++ 9 files changed, 255 insertions(+) create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c create mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 3065cb290aa8..38cbde971b48 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -94,3 +94,10 @@ config DRM_MSM_DSI_14NM_PHY default y help Choose this option if DSI PHY on 8996 is used on the platform. + +config DRM_MSM_DSI_10NM_PHY + bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on SDM845 is used on the platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index ebe0c3d0b126..f74d449476f4 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -83,12 +83,14 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o +msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o +msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 2302046197a8..70d9a9a47acd 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -36,6 +36,7 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_20NM, MSM_DSI_PHY_28NM_8960, MSM_DSI_PHY_14NM, + MSM_DSI_PHY_10NM, MSM_DSI_PHY_MAX }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index c8bfaa780651..8e9d5c255820 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -394,6 +394,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY { .compatible = "qcom,dsi-phy-14nm", .data = &dsi_phy_14nm_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY + { .compatible = "qcom,dsi-phy-10nm", + .data = &dsi_phy_10nm_cfgs }, #endif {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 1733f6608a09..c56268cbdb3d 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; struct msm_dsi_dphy_timing { u32 clk_pre; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c new file mode 100644 index 000000000000..b7545fb63bf5 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + return 0; +} + +static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) +{ +} + +static int dsi_10nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + dev_err(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { + .type = MSM_DSI_PHY_10NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 36000, 32}, + }, + }, + .ops = { + .enable = dsi_10nm_phy_enable, + .disable = dsi_10nm_phy_disable, + .init = dsi_10nm_phy_init, + }, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 491f08dce969..613e206fa4fc 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -166,6 +166,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_14NM: pll = msm_dsi_pll_14nm_init(pdev, id); break; + case MSM_DSI_PHY_10NM: + pll = msm_dsi_pll_10nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index f63e7ada74a8..8b32271cbc24 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -115,5 +115,14 @@ msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) return ERR_PTR(-ENODEV); } #endif +#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY +struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c new file mode 100644 index 000000000000..34c24442d34b --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include +#include +#include + +#include "dsi_pll.h" +#include "dsi.xml.h" + +struct dsi_pll_10nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + int vco_delay; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_10nm *slave; +}; + +#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; + +static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate, + parent_rate); + + return 0; +} + +static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + u64 vco_rate = 0x0; + + DBG("DSI PLL%d returning vco rate = %lu", pll_10nm->id, + (unsigned long)vco_rate); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_10nm_vco_set_rate, + .recalc_rate = dsi_pll_10nm_vco_recalc_rate, + .prepare = msm_dsi_pll_helper_clk_prepare, + .unprepare = msm_dsi_pll_helper_clk_unprepare, +}; + +/* + * PLL Callbacks + */ + +static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d", pll_10nm->id); +} + +static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d", pll_10nm->id); + + return 0; +} + +static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d", pll_10nm->id); + + return 0; +} + +static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d", pll_10nm->id); + + if (byte_clk_provider) + *byte_clk_provider = NULL; + if (pixel_clk_provider) + *pixel_clk_provider = NULL; + + return 0; +} + +static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d", pll_10nm->id); +} + +static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm) +{ + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_10nm *pll_10nm; + struct msm_dsi_pll *pll; + int ret; + + if (!pdev) + return ERR_PTR(-ENODEV); + + pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL); + if (!pll_10nm) + return ERR_PTR(-ENOMEM); + + DBG("DSI PLL%d", id); + + pll_10nm->pdev = pdev; + pll_10nm->id = id; + pll_10nm_list[id] = pll_10nm; + + pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) { + dev_err(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_10nm->mmio)) { + dev_err(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + pll = &pll_10nm->base; + pll->min_rate = 1000000000UL; + pll->max_rate = 3500000000UL; + pll->get_provider = dsi_pll_10nm_get_provider; + pll->destroy = dsi_pll_10nm_destroy; + pll->save_state = dsi_pll_10nm_save_state; + pll->restore_state = dsi_pll_10nm_restore_state; + pll->set_usecase = dsi_pll_10nm_set_usecase; + + pll_10nm->vco_delay = 1; + + ret = pll_10nm_register(pll_10nm); + if (ret) { + dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + return pll; +} -- cgit v1.2.3 From 28e4309ab9c2bade2a93bd3b4c583be5ec440b84 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 11:35:26 +0530 Subject: drm/msm/dsi: Populate PLL 10nm clock ops Populate PLL clock ops from downstream. This contains the VCO PLL ops and the registration of standard clk_divider and clk_mux clocks. Unlike 14nm PLL, the postdividers/mux of the slave PLL doesn't need to be set to the same values of the postdivs/mux of the master PLL. Hence, we don't need special postdivider clock ops like we did with the 14nm PLL driver. Like the previous PLL drivers, the implementation is slightly different from downstream. We don't use shadow clocks, but have the ability to reparent the RCGs to a different source. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c | 662 ++++++++++++++++++++++++++++- 1 file changed, 654 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 34c24442d34b..c4c37a7df637 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -10,6 +10,78 @@ #include "dsi_pll.h" #include "dsi.xml.h" +/* + * DSI PLL 10nm - clock diagram (eg: DSI0): + * + * dsi0_pll_out_div_clk dsi0_pll_bit_clk + * | | + * | | + * +---------+ | +----------+ | +----+ + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte + * +---------+ | +----------+ | +----+ + * | | + * | | dsi0_pll_by_2_bit_clk + * | | | + * | | +----+ | |\ dsi0_pclk_mux + * | |--| /2 |--o--| \ | + * | | +----+ | \ | +---------+ + * | --------------| |--o--| div_7_4 |-- dsi0pll + * |------------------------------| / +---------+ + * | +-----+ | / + * -----------| /4? |--o----------|/ + * +-----+ | | + * | |dsiclk_sel + * | + * dsi0_pll_post_out_div_clk + */ + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 +#define NUM_PROVIDED_CLKS 2 + +struct dsi_pll_regs { + u32 pll_prop_gain_rate; + u32 pll_lockdet_rate; + u32 decimal_div_start; + u32 frac_div_start_low; + u32 frac_div_start_mid; + u32 frac_div_start_high; + u32 pll_clock_inverters; + u32 ssc_stepsize_low; + u32 ssc_stepsize_high; + u32 ssc_div_per_low; + u32 ssc_div_per_high; + u32 ssc_adjper_low; + u32 ssc_adjper_high; + u32 ssc_control; +}; + +struct dsi_pll_config { + u32 ref_freq; + bool div_override; + u32 output_div; + bool ignore_frac; + bool disable_prescaler; + bool enable_ssc; + bool ssc_center; + u32 dec_bits; + u32 frac_bits; + u32 lock_timer; + u32 ssc_freq; + u32 ssc_offset; + u32 ssc_adj_per; + u32 thresh_cycles; + u32 refclk_cycles; +}; + +struct pll_10nm_cached_state { + unsigned long vco_rate; + u8 bit_clk_div; + u8 pix_clk_div; + u8 pll_out_div; + u8 pll_mux; +}; + struct dsi_pll_10nm { struct msm_dsi_pll base; @@ -19,7 +91,24 @@ struct dsi_pll_10nm { void __iomem *phy_cmn_mmio; void __iomem *mmio; + u64 vco_ref_clk_rate; + u64 vco_current_rate; + + /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + int vco_delay; + struct dsi_pll_config pll_configuration; + struct dsi_pll_regs reg_setup; + + /* private clocks: */ + struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; + u32 num_hws; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_10nm_cached_state cached_state; enum msm_dsi_phy_usecase uc; struct dsi_pll_10nm *slave; @@ -33,6 +122,190 @@ struct dsi_pll_10nm { */ static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; +static void dsi_pll_setup_config(struct dsi_pll_10nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + + config->ref_freq = pll->vco_ref_clk_rate; + config->output_div = 1; + config->dec_bits = 8; + config->frac_bits = 18; + config->lock_timer = 64; + config->ssc_freq = 31500; + config->ssc_offset = 5000; + config->ssc_adj_per = 2; + config->thresh_cycles = 32; + config->refclk_cycles = 256; + + config->div_override = false; + config->ignore_frac = false; + config->disable_prescaler = false; + + config->enable_ssc = false; + config->ssc_center = 0; +} + +static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u64 fref = pll->vco_ref_clk_rate; + u64 pll_freq; + u64 divider; + u64 dec, dec_multiple; + u32 frac; + u64 multiplier; + + pll_freq = pll->vco_current_rate; + + if (config->disable_prescaler) + divider = fref; + else + divider = fref * 2; + + multiplier = 1 << config->frac_bits; + dec_multiple = div_u64(pll_freq * multiplier, divider); + div_u64_rem(dec_multiple, multiplier, &frac); + + dec = div_u64(dec_multiple, multiplier); + + if (pll_freq <= 1900000000UL) + regs->pll_prop_gain_rate = 8; + else if (pll_freq <= 3000000000UL) + regs->pll_prop_gain_rate = 10; + else + regs->pll_prop_gain_rate = 12; + if (pll_freq < 1100000000UL) + regs->pll_clock_inverters = 8; + else + regs->pll_clock_inverters = 0; + + regs->pll_lockdet_rate = config->lock_timer; + regs->decimal_div_start = dec; + regs->frac_div_start_low = (frac & 0xff); + regs->frac_div_start_mid = (frac & 0xff00) >> 8; + regs->frac_div_start_high = (frac & 0x30000) >> 16; +} + +#define SSC_CENTER BIT(0) +#define SSC_EN BIT(1) + +static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u32 ssc_per; + u32 ssc_mod; + u64 ssc_step_size; + u64 frac; + + if (!config->enable_ssc) { + DBG("SSC not enabled\n"); + return; + } + + ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1; + ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); + ssc_per -= ssc_mod; + + frac = regs->frac_div_start_low | + (regs->frac_div_start_mid << 8) | + (regs->frac_div_start_high << 16); + ssc_step_size = regs->decimal_div_start; + ssc_step_size *= (1 << config->frac_bits); + ssc_step_size += frac; + ssc_step_size *= config->ssc_offset; + ssc_step_size *= (config->ssc_adj_per + 1); + ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); + ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); + + regs->ssc_div_per_low = ssc_per & 0xFF; + regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8; + regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF); + regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8); + regs->ssc_adjper_low = config->ssc_adj_per & 0xFF; + regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8; + + regs->ssc_control = config->ssc_center ? SSC_CENTER : 0; + + pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", + regs->decimal_div_start, frac, config->frac_bits); + pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", + ssc_per, (u32)ssc_step_size, config->ssc_adj_per); +} + +static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *regs = &pll->reg_setup; + + if (pll->pll_configuration.enable_ssc) { + pr_debug("SSC is enabled\n"); + + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1, + regs->ssc_stepsize_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, + regs->ssc_stepsize_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1, + regs->ssc_div_per_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1, + regs->ssc_div_per_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1, + regs->ssc_adjper_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1, + regs->ssc_adjper_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL, + SSC_EN | regs->ssc_control); + } +} + +static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->mmio; + + pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80); + pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03); + pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e); + pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40); + pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, + 0xba); + pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c); + pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, + 0x4c); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80); + pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29); + pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f); +} + +static void dsi_pll_commit(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *reg = &pll->reg_setup; + + pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); + pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1, + reg->decimal_div_start); + pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1, + reg->frac_div_start_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1, + reg->frac_div_start_mid); + pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, + reg->frac_div_start_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); + pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); + pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, + reg->pll_clock_inverters); +} + static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -42,18 +315,192 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate, parent_rate); + pll_10nm->vco_current_rate = rate; + pll_10nm->vco_ref_clk_rate = parent_rate; + + dsi_pll_setup_config(pll_10nm); + + dsi_pll_calc_dec_frac(pll_10nm); + + dsi_pll_calc_ssc(pll_10nm); + + dsi_pll_commit(pll_10nm); + + dsi_pll_config_hzindep_reg(pll_10nm); + + dsi_pll_ssc_commit(pll_10nm); + + /* flush, ensure all register writes are done*/ + wmb(); + return 0; } +static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) +{ + int rc; + u32 status = 0; + u32 const delay_us = 100; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(pll->mmio + + REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE, + status, + ((status & BIT(0)) > 0), + delay_us, + timeout_us); + if (rc) + pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", + pll->id, status); + + return rc; +} + +static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0); + + pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0); + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0, + data & ~BIT(5)); + ndelay(250); +} + +static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0); + + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0, + data | BIT(5)); + pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0); + ndelay(250); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll) +{ + u32 data; + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1, + data & ~BIT(5)); +} + +static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) +{ + u32 data; + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1, + data | BIT(5)); +} + +static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + int rc; + + dsi_pll_enable_pll_bias(pll_10nm); + if (pll_10nm->slave) + dsi_pll_enable_pll_bias(pll_10nm->slave); + + /* Start PLL */ + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, + 0x01); + + /* + * ensure all PLL configurations are written prior to checking + * for PLL lock. + */ + wmb(); + + /* Check for PLL lock */ + rc = dsi_pll_10nm_lock_status(pll_10nm); + if (rc) { + pr_err("PLL(%d) lock failed\n", pll_10nm->id); + goto error; + } + + pll->pll_on = true; + + dsi_pll_enable_global_clk(pll_10nm); + if (pll_10nm->slave) + dsi_pll_enable_global_clk(pll_10nm->slave); + + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, + 0x01); + if (pll_10nm->slave) + pll_write(pll_10nm->slave->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01); + +error: + return rc; +} + +static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll) +{ + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0); + dsi_pll_disable_pll_bias(pll); +} + +static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + /* + * To avoid any stray glitches while abruptly powering down the PLL + * make sure to gate the clock using the clock enable bit before + * powering down the PLL + */ + dsi_pll_disable_global_clk(pll_10nm); + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0); + dsi_pll_disable_sub(pll_10nm); + if (pll_10nm->slave) { + dsi_pll_disable_global_clk(pll_10nm->slave); + dsi_pll_disable_sub(pll_10nm->slave); + } + /* flush, ensure all register writes are done */ + wmb(); + pll->pll_on = false; +} + static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + void __iomem *base = pll_10nm->mmio; + u64 ref_clk = pll_10nm->vco_ref_clk_rate; u64 vco_rate = 0x0; - - DBG("DSI PLL%d returning vco rate = %lu", pll_10nm->id, - (unsigned long)vco_rate); + u64 multiplier; + u32 frac; + u32 dec; + u64 pll_freq, tmp64; + + dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); + dec &= 0xff; + + frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); + frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) & + 0xff) << 8); + frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & + 0x3) << 16); + + /* + * TODO: + * 1. Assumes prescaler is disabled + * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) + */ + multiplier = 1 << 18; + pll_freq = dec * (ref_clk * 2); + tmp64 = (ref_clk * 2 * frac); + pll_freq += div_u64(tmp64, multiplier); + + vco_rate = pll_freq; + + DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", + pll_10nm->id, (unsigned long)vco_rate, dec, frac); return (unsigned long)vco_rate; } @@ -62,8 +509,8 @@ static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { .round_rate = msm_dsi_pll_helper_clk_round_rate, .set_rate = dsi_pll_10nm_vco_set_rate, .recalc_rate = dsi_pll_10nm_vco_recalc_rate, - .prepare = msm_dsi_pll_helper_clk_prepare, - .unprepare = msm_dsi_pll_helper_clk_unprepare, + .prepare = dsi_pll_10nm_vco_prepare, + .unprepare = dsi_pll_10nm_vco_unprepare, }; /* @@ -73,13 +520,45 @@ static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll) { struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy_cmn_mmio; + u32 cmn_clk_cfg0, cmn_clk_cfg1; - DBG("DSI PLL%d", pll_10nm->id); + cached->pll_out_div = pll_read(pll_10nm->mmio + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + cached->pll_out_div &= 0x3; + + cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); + cached->bit_clk_div = cmn_clk_cfg0 & 0xf; + cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; + + cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + cached->pll_mux = cmn_clk_cfg1 & 0x3; + + DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", + pll_10nm->id, cached->pll_out_div, cached->bit_clk_div, + cached->pix_clk_div, cached->pll_mux); } static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) { struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy_cmn_mmio; + u32 val; + + val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; + val |= cached->pll_out_div; + pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val); + + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + cached->bit_clk_div | (cached->pix_clk_div << 4)); + + val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + val &= ~0x3; + val |= cached->pll_mux; + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val); DBG("DSI PLL%d", pll_10nm->id); @@ -90,9 +569,29 @@ static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll, enum msm_dsi_phy_usecase uc) { struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + void __iomem *base = pll_10nm->phy_cmn_mmio; + u32 data = 0x0; /* internal PLL */ DBG("DSI PLL%d", pll_10nm->id); + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + break; + case MSM_DSI_PHY_MASTER: + pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + data = 0x1; /* external PLL */ + break; + default: + return -EINVAL; + } + + /* set PLL src */ + pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2)); + + pll_10nm->uc = uc; + return 0; } @@ -101,13 +600,14 @@ static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll, struct clk **pixel_clk_provider) { struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data; DBG("DSI PLL%d", pll_10nm->id); if (byte_clk_provider) - *byte_clk_provider = NULL; + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; if (pixel_clk_provider) - *pixel_clk_provider = NULL; + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; return 0; } @@ -119,8 +619,151 @@ static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll) DBG("DSI PLL%d", pll_10nm->id); } +/* + * The post dividers and mux clocks are created using the standard divider and + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux + * state to follow the master PLL's divider/mux state. Therefore, we don't + * require special clock ops that also configure the slave PLL registers + */ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm) { + char clk_name[32], parent[32], vco_name[32]; + char parent2[32], parent3[32], parent4[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_10nm_vco, + }; + struct device *dev = &pll_10nm->pdev->dev; + struct clk_hw **hws = pll_10nm->hws; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int num = 0; + int ret; + + DBG("DSI%d", pll_10nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id); + pll_10nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_10nm->base.clk_hw); + if (ret) + return ret; + + hws[num++] = &pll_10nm->base.clk_hw; + + snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id); + + hw = clk_hw_register_divider(dev, clk_name, + parent, CLK_SET_RATE_PARENT, + pll_10nm->mmio + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + + /* BIT CLK: DIV_CTRL_3_0 */ + hw = clk_hw_register_divider(dev, clk_name, parent, + CLK_SET_RATE_PARENT, + pll_10nm->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, + &pll_10nm->postdiv_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + + /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 2); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id); + snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id); + + hw = clk_hw_register_mux(dev, clk_name, + (const char *[]){ + parent, parent2, parent3, parent4 + }, 4, 0, pll_10nm->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_CLK_CFG1, + 0, 2, 0, NULL); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id); + + /* PIX CLK DIV : DIV_CTRL_7_4*/ + hw = clk_hw_register_divider(dev, clk_name, parent, + 0, pll_10nm->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, + &pll_10nm->postdiv_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + pll_10nm->num_hws = num; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_10nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_10nm->hw_data); + if (ret) { + dev_err(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + return 0; } @@ -172,5 +815,8 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) return ERR_PTR(ret); } + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_pll_save_state(pll); + return pll; } -- cgit v1.2.3 From ff73ff19406098f71ec7628b951e0765f1df8128 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 11:35:27 +0530 Subject: drm/msm/dsi: Populate the 10nm PHY funcs Populate the PHY ops with the downstream driver as reference. There are a couple of TODOs which need to be resolved: - The PHY timings are all hardcoded for now. This needs to be replaced with automatic calculations once we get/understand them. - There are some lane configuration registers which use a new representation between physical and logical lane mappings. For now, we've hardcoced them to follow the default mapping (i.e logical 0 -> phy 0, logical 1 -> phy 1 etc). Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c | 199 +++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index b7545fb63bf5..0af951aaeea1 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -8,9 +8,208 @@ #include "dsi_phy.h" #include "dsi.xml.h" +static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data = 0; + + data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); + mb(); /* make sure read happened */ + + return (data & BIT(0)); +} + +static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *lane_base = phy->lane_base; + int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ + + /* + * LPRX and CDRX need to enabled only for physical data lane + * corresponding to the logical data lane 0 + */ + if (enable) + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3); + else + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0); +} + +static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) +{ + int i; + u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; + void __iomem *lane_base = phy->lane_base; + + /* Strength ctrl settings */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i), + 0x55); + /* + * Disable LPRX and CDRX for all lanes. And later on, it will + * be only enabled for the physical data lane corresponding + * to the logical data lane 0 + */ + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i), + 0x88); + } + + dsi_phy_hw_v3_0_config_lpcdrx(phy, true); + + /* other settings */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i), + i == 4 ? 0x80 : 0x0); + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0); + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i), + tx_dctrl[i]); + } + + /* Toggle BIT 0 to release freeze I/0 */ + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); +} + +static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + /* + * TODO: These params need to be computed, they're currently hardcoded + * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a + * default escape clock of 19.2 Mhz. + */ + + timing->hs_halfbyte_en = 0; + timing->clk_zero = 0x1c; + timing->clk_prepare = 0x07; + timing->clk_trail = 0x07; + timing->hs_exit = 0x23; + timing->hs_zero = 0x21; + timing->hs_prepare = 0x07; + timing->hs_trail = 0x07; + timing->hs_rqst = 0x05; + timing->ta_sure = 0x00; + timing->ta_go = 0x03; + timing->ta_get = 0x04; + + timing->shared_timings.clk_pre = 0x2d; + timing->shared_timings.clk_post = 0x0d; + + return 0; +} + static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, struct msm_dsi_phy_clk_request *clk_req) { + int ret; + u32 status; + u32 const delay_us = 5; + u32 const timeout_us = 1000; + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + u32 data; + + DBG(""); + + if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + if (dsi_phy_hw_v3_0_is_pll_on(phy)) + pr_warn("PLL turned on before configuring PHY\n"); + + /* wait for REFGEN READY */ + ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS, + status, (status & BIT(0)), + delay_us, timeout_us); + if (ret) { + pr_err("Ref gen not ready. Aborting\n"); + return -EINVAL; + } + + /* de-assert digital and pll power down */ + data = BIT(6) | BIT(5); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); + + /* Assert PLL core reset */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00); + + /* turn off resync FIFO */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00); + + /* Select MS1 byte-clk */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10); + + /* Enable LDO */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59); + + /* Configure PHY lane swap (TODO: we need to calculate this) */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84); + + /* DSI PHY timings */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0, + timing->hs_halfbyte_en); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1, + timing->clk_zero); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2, + timing->clk_prepare); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3, + timing->clk_trail); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4, + timing->hs_exit); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5, + timing->hs_zero); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6, + timing->hs_prepare); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7, + timing->hs_trail); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8, + timing->hs_rqst); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9, + timing->ta_go | (timing->ta_sure << 3)); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10, + timing->ta_get); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11, + 0x00); + + /* Remove power down from all blocks */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f); + + /* power up lanes */ + data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* TODO: only power up lanes that are used */ + data |= 0x1F; + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F); + + /* Select full-rate mode */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* DSI lane settings */ + dsi_phy_hw_v3_0_lane_settings(phy); + + DBG("DSI%d PHY enabled", phy->id); + return 0; } -- cgit v1.2.3 From 29a1157ceba2bf885479d6dcd2933a6b0778266b Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:42 +0530 Subject: drm/msm/dsi: Use msm_clk_get in dsi_get_config We try to get the interface clock in dsi_get_config early during DSI's component bind. Try getting both the "iface" and "iface_clk" clock name variants so that we are compatible with both new and legacy DT. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_host.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 0f7324a686ca..7611fe014036 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -214,7 +214,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( goto exit; } - ahb_clk = clk_get(dev, "iface_clk"); + ahb_clk = msm_clk_get(msm_host->pdev, "iface"); if (IS_ERR(ahb_clk)) { pr_err("%s: cannot get interface clock\n", __func__); goto put_gdsc; @@ -225,7 +225,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( ret = regulator_enable(gdsc_reg); if (ret) { pr_err("%s: unable to enable gdsc\n", __func__); - goto put_clk; + goto put_gdsc; } ret = clk_prepare_enable(ahb_clk); @@ -249,8 +249,6 @@ disable_clks: disable_gdsc: regulator_disable(gdsc_reg); pm_runtime_put_sync(dev); -put_clk: - clk_put(ahb_clk); put_gdsc: regulator_put(gdsc_reg); exit: -- cgit v1.2.3 From 02f7a6ca1692ffe1012abd512b8a88ba9a925095 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:43 +0530 Subject: drm/msm/dsi: Add SDM845 in dsi_cfg SDM845 contains 2 DSI6G v2.2.1 host controllers. Add them in dsi_cfg. Cc: Jordan Crouse Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_cfg.c | 19 +++++++++++++++++++ drivers/gpu/drm/msm/dsi/dsi_cfg.h | 1 + 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 65c1dfbbe019..0327bb54b01b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -118,6 +118,24 @@ static const struct msm_dsi_config msm8996_dsi_cfg = { .num_dsi = 2, }; +static const char * const dsi_sdm845_bus_clk_names[] = { + "iface", "bus", +}; + +static const struct msm_dsi_config sdm845_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 1, + .regs = { + {"vdda", 21800, 4 }, /* 1.2 V */ + }, + }, + .bus_clk_names = dsi_sdm845_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names), + .io_start = { 0xae94000, 0xae96000 }, + .num_dsi = 2, +}; + static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, @@ -131,6 +149,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg}, }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 00a5da2663c6..9cfdcf1c95d5 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -25,6 +25,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 +#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 -- cgit v1.2.3 From c1d97083cd48a2b3f4382f0122889d1d73661b2e Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:44 +0530 Subject: drm/msm/dsi: Add byte_intf_clk DSI6G v2.0+ blocks have a new clock input to them called byte_intf_clk. It's rate is to be set as byte_clk / 2. Within the clock controller (CC) subsystem, this clock is a child/descendant of the byte_clk. Set it up as an optional clock in the DSI host driver. Make sure that we enable/set its rate only after we configure byte_clk. This is required for the ancestor clocks in the CC to be configured correctly. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_host.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 7611fe014036..f675975c2655 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -115,6 +115,7 @@ struct msm_dsi_host { struct clk *pixel_clk; struct clk *byte_clk_src; struct clk *pixel_clk_src; + struct clk *byte_intf_clk; u32 byte_clk_rate; u32 esc_clk_rate; @@ -377,6 +378,14 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) goto exit; } + msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); + if (IS_ERR(msm_host->byte_intf_clk)) { + ret = PTR_ERR(msm_host->byte_intf_clk); + pr_debug("%s: can't find byte_intf clock. ret=%d\n", + __func__, ret); + msm_host->byte_intf_clk = NULL; + } + msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk); if (!msm_host->byte_clk_src) { ret = -ENODEV; @@ -502,6 +511,16 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) goto error; } + if (msm_host->byte_intf_clk) { + ret = clk_set_rate(msm_host->byte_intf_clk, + msm_host->byte_clk_rate / 2); + if (ret) { + pr_err("%s: Failed to set rate byte intf clk, %d\n", + __func__, ret); + goto error; + } + } + ret = clk_prepare_enable(msm_host->esc_clk); if (ret) { pr_err("%s: Failed to enable dsi esc clk\n", __func__); @@ -520,8 +539,19 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) goto pixel_clk_err; } + if (msm_host->byte_intf_clk) { + ret = clk_prepare_enable(msm_host->byte_intf_clk); + if (ret) { + pr_err("%s: Failed to enable byte intf clk\n", + __func__); + goto byte_intf_clk_err; + } + } + return 0; +byte_intf_clk_err: + clk_disable_unprepare(msm_host->pixel_clk); pixel_clk_err: clk_disable_unprepare(msm_host->byte_clk); byte_clk_err: @@ -615,6 +645,8 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { clk_disable_unprepare(msm_host->esc_clk); clk_disable_unprepare(msm_host->pixel_clk); + if (msm_host->byte_intf_clk) + clk_disable_unprepare(msm_host->byte_intf_clk); clk_disable_unprepare(msm_host->byte_clk); } else { clk_disable_unprepare(msm_host->pixel_clk); -- cgit v1.2.3 From 45be9dc52eb4b9ec6f98da9263c8fcb39fbd86b3 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:45 +0530 Subject: dt-bindings: display: msm/dsi: Remove unused properties "qcom,dsi-host-index" and "qcom,dsi-phy-index" DT props aren't acceptable and have never been used in any DT files. Remove them. Cc: Rob Herring Cc: devicetree@vger.kernel.org Signed-off-by: Archit Taneja Reviewed-by: Rob Herring Signed-off-by: Rob Clark --- Documentation/devicetree/bindings/display/msm/dsi.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index a6671bd2c85a..457c688736be 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -7,8 +7,6 @@ Required properties: - reg: Physical base address and length of the registers of controller - reg-names: The names of register regions. The following regions are required: * "dsi_ctrl" -- qcom,dsi-host-index: The ID of DSI controller hardware instance. This should - be 0 or 1, since we have 2 DSI controllers at most for now. - interrupts: The interrupt signal from the DSI block. - power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: Phandles to device clocks. @@ -96,8 +94,6 @@ Required properties: * "dsi_phy_regulator" - clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating 2 clocks: A byte clock (index 0), and a pixel clock (index 1). -- qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should - be 0 or 1, since we have 2 DSI PHYs at most for now. - power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: Phandles to device clocks. See [1] for details on clock bindings. - clock-names: the following clocks are required: -- cgit v1.2.3 From 8c4905fd4939c59e0f7993ba34883e328eef4b59 Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:46 +0530 Subject: dt-bindings: display: msm/dsi: Fix the PHY regulator supply props The PHY regulator supply names vary across different PHY versions. Mention explicitly which PHYs require which supplies. Cc: Rob Herring Cc: devicetree@vger.kernel.org Signed-off-by: Archit Taneja Reviewed-by: Rob Herring Signed-off-by: Rob Clark --- Documentation/devicetree/bindings/display/msm/dsi.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index 457c688736be..9c3ad6bbb9f0 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -98,7 +98,11 @@ Required properties: - clocks: Phandles to device clocks. See [1] for details on clock bindings. - clock-names: the following clocks are required: * "iface" + For 28nm HPM/LP, 28nm 8960 PHYs: - vddio-supply: phandle to vdd-io regulator device node + For 20nm PHY: +- vddio-supply: phandle to vdd-io regulator device node +- vcca-supply: phandle to vcca regulator device node Optional properties: - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY -- cgit v1.2.3 From 31767e00e428c891343f94e5a94909bb7a642bcf Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:47 +0530 Subject: dt-bindings: display: msm/dsi: Add compatible for 14nm DSI PHY Add the compatible string for 14nm DSI PHY (used in MSM8996/APQ8096). >From 14nm PHY onwards, the "dsi_phy_regulator" reg-name is not required, but "dsi_phy_lane" reg-name is. Update the doc to specify the reg-names each PHY revision needs. Cc: Rob Herring Cc: devicetree@vger.kernel.org Signed-off-by: Archit Taneja Reviewed-by: Rob Herring Signed-off-by: Rob Clark --- Documentation/devicetree/bindings/display/msm/dsi.txt | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index 9c3ad6bbb9f0..26a1796b7145 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -86,12 +86,19 @@ Required properties: * "qcom,dsi-phy-28nm-lp" * "qcom,dsi-phy-20nm" * "qcom,dsi-phy-28nm-8960" -- reg: Physical base address and length of the registers of PLL, PHY and PHY - regulator + * "qcom,dsi-phy-14nm" +- reg: Physical base address and length of the registers of PLL, PHY. Some + revisions require the PHY regulator base address, whereas others require the + PHY lane base address. See below for each PHY revision. - reg-names: The names of register regions. The following regions are required: + For DSI 28nm HPM/LP/8960 PHYs and 20nm PHY: * "dsi_pll" * "dsi_phy" * "dsi_phy_regulator" + For DSI 14nm PHY: + * "dsi_pll" + * "dsi_phy" + * "dsi_phy_lane" - clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating 2 clocks: A byte clock (index 0), and a pixel clock (index 1). - power-domains: Should be <&mmcc MDSS_GDSC>. @@ -102,6 +109,8 @@ Required properties: - vddio-supply: phandle to vdd-io regulator device node For 20nm PHY: - vddio-supply: phandle to vdd-io regulator device node +- vcca-supply: phandle to vcca regulator device node + For 14nm PHY: - vcca-supply: phandle to vcca regulator device node Optional properties: -- cgit v1.2.3 From 35f135a3b1cfeee4ef2bd92755debd0bcf60cb9f Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Wed, 17 Jan 2018 15:04:48 +0530 Subject: dt-bindings: display: msm/dsi: Add updates for SDM845 SDM845 uses a newer revision (v2.0+) of the 6G DSI controller. This revision has another clock input at the block boundary called the byte interface clock. Specify this new clock in the binding. A 10nm DSI PHY is used along with the controller. Add a compatible string for it and specify its base address/regulator supply needs. Cc: Rob Herring Cc: devicetree@vger.kernel.org Signed-off-by: Archit Taneja Reviewed-by: Rob Herring Signed-off-by: Rob Clark --- Documentation/devicetree/bindings/display/msm/dsi.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index 26a1796b7145..518e9cdf0d4b 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -20,6 +20,8 @@ Required properties: * "core" For DSIv2, we need an additional clock: * "src" + For DSI6G v2.0 onwards, we need also need the clock: + * "byte_intf" - assigned-clocks: Parents of "byte" and "pixel" for the given platform. - assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block. See [1] for details on clock bindings. @@ -87,6 +89,7 @@ Required properties: * "qcom,dsi-phy-20nm" * "qcom,dsi-phy-28nm-8960" * "qcom,dsi-phy-14nm" + * "qcom,dsi-phy-10nm" - reg: Physical base address and length of the registers of PLL, PHY. Some revisions require the PHY regulator base address, whereas others require the PHY lane base address. See below for each PHY revision. @@ -95,7 +98,7 @@ Required properties: * "dsi_pll" * "dsi_phy" * "dsi_phy_regulator" - For DSI 14nm PHY: + For DSI 14nm and 10nm PHYs: * "dsi_pll" * "dsi_phy" * "dsi_phy_lane" @@ -112,6 +115,8 @@ Required properties: - vcca-supply: phandle to vcca regulator device node For 14nm PHY: - vcca-supply: phandle to vcca regulator device node + For 10nm PHY: +- vdds-supply: phandle to vdds regulator device node Optional properties: - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY -- cgit v1.2.3 From 52a8988de97f5e7370d15261e81613779e7f057d Mon Sep 17 00:00:00 2001 From: Luis de Bethencourt Date: Wed, 17 Jan 2018 18:55:47 +0000 Subject: drm/msm/mdp5: Fix trailing semicolon The trailing semicolon is an empty statement that does no operation. Removing it since it doesn't do anything. Signed-off-by: Luis de Bethencourt Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 3e9bba4d6624..6d8e3a9a6fc0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) } else { dev_info(&pdev->dev, "no iommu, fallback to phys contig buffers for scanout\n"); - aspace = NULL;; + aspace = NULL; } pm_runtime_put_sync(&pdev->dev); -- cgit v1.2.3 From cccb9723821806b5ec167d7910c6e82622f4a305 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Mon, 22 Jan 2018 08:34:05 +0100 Subject: drm/msm/hdmi: fix semicolon.cocci warnings Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci CC: Laurent Pinchart Signed-off-by: Fengguang Wu Signed-off-by: Julia Lawall Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c index 6e767979aab3..3656155e3793 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -769,7 +769,7 @@ static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctr if (rc) { pr_err("%s: wait key and an ready failed\n", __func__); return rc; - }; + } /* Read BCAPS and send to HDCP engine */ rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl); -- cgit v1.2.3 From dc9a9b32053efea0a2610be98814519ec59570b4 Mon Sep 17 00:00:00 2001 From: Steve Kowalik Date: Fri, 26 Jan 2018 14:55:54 +1100 Subject: drm/msm: Replace gem_object deprecated functions drm_gem_object_{reference,unreference,unreference_unlocked} are deprecated functions, and merely alias to the get/put functions. Switch to the new names. Signed-off-by: Steve Kowalik Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 6 +++--- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 6 +++--- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 2 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 2 +- drivers/gpu/drm/msm/msm_drv.c | 8 ++++---- drivers/gpu/drm/msm/msm_fb.c | 6 +++--- drivers/gpu/drm/msm/msm_gem.c | 12 ++++++------ drivers/gpu/drm/msm/msm_gpu.c | 8 ++++---- drivers/gpu/drm/msm/msm_ringbuffer.c | 2 +- 9 files changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index fa08b4897a56..795fe11a9371 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -858,19 +858,19 @@ static void a5xx_destroy(struct msm_gpu *gpu) if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); + drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo); } if (a5xx_gpu->pfp_bo) { if (a5xx_gpu->pfp_iova) msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); + drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo); } if (a5xx_gpu->gpmu_bo) { if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); + drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo); } adreno_gpu_cleanup(adreno_gpu); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 14bd3bd3e040..6e5e1aa54ce1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -129,7 +129,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) struct msm_kms *kms = &mdp4_kms->base.base; msm_gem_put_iova(val, kms->aspace); - drm_gem_object_unreference_unlocked(val); + drm_gem_object_put_unlocked(val); } static void mdp4_crtc_destroy(struct drm_crtc *crtc) @@ -382,7 +382,7 @@ static void update_cursor(struct drm_crtc *crtc) if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ - drm_gem_object_reference(next_bo); + drm_gem_object_get(next_bo); msm_gem_get_iova(next_bo, kms->aspace, &iova); /* enable cursor: */ @@ -467,7 +467,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail: - drm_gem_object_unreference_unlocked(cursor_bo); + drm_gem_object_put_unlocked(cursor_bo); return ret; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index f7f087419ed8..4b646bf9c214 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -164,7 +164,7 @@ static void mdp4_destroy(struct msm_kms *kms) if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace); - drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); + drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index e414850dbbda..8c5ed0b59e46 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -170,7 +170,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) struct msm_kms *kms = &mdp5_kms->base.base; msm_gem_put_iova(val, kms->aspace); - drm_gem_object_unreference_unlocked(val); + drm_gem_object_put_unlocked(val); } static void mdp5_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d90ef1d78a1b..30cd514d8f7c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -660,7 +660,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, ret = msm_gem_cpu_prep(obj, args->op, &timeout); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -678,7 +678,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, ret = msm_gem_cpu_fini(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -718,7 +718,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, args->offset = msm_gem_mmap_offset(obj); } - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -783,7 +783,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, ret = 0; } - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); unlock: mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index fc175e724ad6..0e0c87252ab0 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -53,7 +53,7 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) for (i = 0; i < n; i++) { struct drm_gem_object *bo = msm_fb->planes[i]; - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_put_unlocked(bo); } kfree(msm_fb); @@ -160,7 +160,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, out_unref: for (i = 0; i < n; i++) - drm_gem_object_unreference_unlocked(bos[i]); + drm_gem_object_put_unlocked(bos[i]); return ERR_PTR(ret); } @@ -274,7 +274,7 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format /* note: if fb creation failed, we can't rely on fb destroy * to unref the bo: */ - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_put_unlocked(bo); return ERR_CAST(fb); } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 07376de9ff4c..0e5073af3913 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -470,7 +470,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, *offset = msm_gem_mmap_offset(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); fail: return ret; @@ -854,7 +854,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ret = drm_gem_handle_create(file, obj, handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -974,7 +974,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, return obj; fail: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } @@ -1034,7 +1034,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, return obj; fail: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } @@ -1052,7 +1052,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, if (iova) { ret = msm_gem_get_iova(obj, aspace, iova); if (ret) { - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); return ERR_PTR(ret); } } @@ -1060,7 +1060,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, vaddr = msm_gem_get_vaddr(obj); if (IS_ERR(vaddr)) { msm_gem_put_iova(obj, aspace); - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); return ERR_CAST(vaddr); } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index bd376f9e18a7..8078e4d52fe0 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -552,7 +552,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* move to inactive: */ msm_gem_move_to_inactive(&msm_obj->base); msm_gem_put_iova(&msm_obj->base, gpu->aspace); - drm_gem_object_unreference(&msm_obj->base); + drm_gem_object_put(&msm_obj->base); } pm_runtime_mark_last_busy(&gpu->pdev->dev); @@ -634,7 +634,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); /* submit takes a reference to the bo and iova until retired: */ - drm_gem_object_reference(&msm_obj->base); + drm_gem_object_get(&msm_obj->base); msm_gem_get_iova(&msm_obj->base, submit->gpu->aspace, &iova); @@ -865,7 +865,7 @@ fail: if (gpu->memptrs_bo) { msm_gem_put_vaddr(gpu->memptrs_bo); msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + drm_gem_object_put_unlocked(gpu->memptrs_bo); } platform_set_drvdata(pdev, NULL); @@ -888,7 +888,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) if (gpu->memptrs_bo) { msm_gem_put_vaddr(gpu->memptrs_bo); msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + drm_gem_object_put_unlocked(gpu->memptrs_bo); } if (!IS_ERR_OR_NULL(gpu->aspace)) { diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 6ca98da35f63..6f5295b3f2f6 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -76,7 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) if (ring->bo) { msm_gem_put_iova(ring->bo, ring->gpu->aspace); msm_gem_put_vaddr(ring->bo); - drm_gem_object_unreference_unlocked(ring->bo); + drm_gem_object_put_unlocked(ring->bo); } kfree(ring); } -- cgit v1.2.3 From 9d20a0e6a8f4edf37d75f3bca41f99f52a440c22 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 22 Jan 2018 11:10:45 -0700 Subject: drm/msm/gpu: Set number of clocks to 0 if the list allocation fails If we fail to allocate gpu->grp_clks reset the number of available clocks to zero to avoid referencing the missing array later. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gpu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 8078e4d52fe0..1c09acfb4028 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -682,8 +682,10 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks, GFP_KERNEL); - if (!gpu->grp_clks) + if (!gpu->grp_clks) { + gpu->nr_clocks = 0; return -ENOMEM; + } of_property_for_each_string(dev->of_node, "clock-names", prop, name) { gpu->grp_clks[i] = get_clock(dev, name); -- cgit v1.2.3 From edf5ceac316a95539a0b063d60d03f3226046f10 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 22 Jan 2018 11:10:46 -0700 Subject: drm/msm: Pass the correct aperture end to drm_mm_init drm_mm_init() takes the start and length of the intended virtual memory address region but the msm code is passing the end of the region instead. That would work out if the region started at 0 but it doesn't so the top of the region sneaks above the 32 bit boundary which won't work because the driver doesn't support 64 bit addresses for the GPU yet. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gem_vma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index d34e331554f3..ffbec224551b 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -96,6 +96,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name) { struct msm_gem_address_space *aspace; + u64 size = domain->geometry.aperture_end - + domain->geometry.aperture_start; aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); if (!aspace) @@ -106,7 +108,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, aspace->mmu = msm_iommu_new(dev, domain); drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), - (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); + size >> PAGE_SHIFT); kref_init(&aspace->kref); -- cgit v1.2.3 From f306953fdb1145020dd2a838698792d686feb2e3 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 22 Jan 2018 11:10:47 -0700 Subject: drm/msm/adreno: Rename gpmufw to powerfw The power management device on the a5xx cores is known as the GPMU (Graphics Power Management Unit). On a6xx cores the device was expanded and renamed as the GMU (Graphics Management Unit). Rename the 'gpmufw' name struct adreno_info as 'powerfw' to avoid confusion. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_power.c | 2 +- drivers/gpu/drm/msm/adreno/adreno_device.c | 2 +- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 4e4d965fd9ab..6630e6c0c8be 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -270,7 +270,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) return; /* Get the firmware */ - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw); + fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->powerfw); if (IS_ERR(fw)) { DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n", gpu->name); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 6263cb906b3c..d64ceeb0d6f0 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -84,7 +84,7 @@ static const struct adreno_info gpulist[] = { .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | ADRENO_QUIRK_FAULT_DETECT_MASK, .init = a5xx_gpu_init, - .gpmufw = "a530v3_gpmu.fw2", + .powerfw = "a530v3_gpmu.fw2", .zapfw = "a530_zap.mdt", }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 8d3d0a924908..0a869bb8ee9d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -73,7 +73,7 @@ struct adreno_info { uint32_t revn; const char *name; const char *pm4fw, *pfpfw; - const char *gpmufw; + const char *powerfw; uint32_t gmem; enum adreno_quirks quirks; struct msm_gpu *(*init)(struct drm_device *dev); -- cgit v1.2.3 From c5e3548c295ace44c2ec8c3af1c10e82bc47f9b3 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 1 Feb 2018 12:15:16 -0700 Subject: drm/msm/adreno: Define a list of firmware files to load per target The number and type of firmware files required differs for each target. Instead of using a fixed struct member for each possible firmware file use a generic list of files that should be loaded on boot. Use some semi-target specific enums to help each target find the appropriate firmware(s) that it needs to load. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 8 +++--- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 8 +++--- drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 13 ++++----- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 +++--- drivers/gpu/drm/msm/adreno/a5xx_power.c | 26 +++++------------- drivers/gpu/drm/msm/adreno/adreno_device.c | 44 ++++++++++++++++++++---------- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 33 ++++++++++++---------- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 12 ++++++-- 8 files changed, 80 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 4baef2738178..1dd84d3489ae 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -256,8 +256,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu) */ /* Load PM4: */ - ptr = (uint32_t *)(adreno_gpu->pm4->data); - len = adreno_gpu->pm4->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; DBG("loading PM4 ucode version: %x", ptr[1]); gpu_write(gpu, REG_AXXX_CP_DEBUG, @@ -268,8 +268,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); /* Load PFP: */ - ptr = (uint32_t *)(adreno_gpu->pfp->data); - len = adreno_gpu->pfp->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; DBG("loading PFP ucode version: %x", ptr[5]); gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 8199a4b9f2fa..2884b1b1660c 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -274,16 +274,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) return ret; /* Load PM4: */ - ptr = (uint32_t *)(adreno_gpu->pm4->data); - len = adreno_gpu->pm4->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; DBG("loading PM4 ucode version: %u", ptr[0]); gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0); for (i = 1; i < len; i++) gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]); /* Load PFP: */ - ptr = (uint32_t *)(adreno_gpu->pfp->data); - len = adreno_gpu->pfp->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; DBG("loading PFP ucode version: %u", ptr[0]); gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index cef09780ef17..6b279414b9c0 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -123,15 +123,12 @@ reset_set(void *data, u64 val) mutex_lock(&dev->struct_mutex); - if (adreno_gpu->pm4) { - release_firmware(adreno_gpu->pm4); - adreno_gpu->pm4 = NULL; - } + release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]); + adreno_gpu->fw[ADRENO_FW_PM4] = NULL; + + release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]); + adreno_gpu->fw[ADRENO_FW_PFP] = NULL; - if (adreno_gpu->pfp) { - release_firmware(adreno_gpu->pfp); - adreno_gpu->pfp = NULL; - } if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 795fe11a9371..517e19c3f9ed 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -523,8 +523,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) int ret; if (!a5xx_gpu->pm4_bo) { - a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4, - &a5xx_gpu->pm4_iova); + a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, + adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova); if (IS_ERR(a5xx_gpu->pm4_bo)) { ret = PTR_ERR(a5xx_gpu->pm4_bo); @@ -536,8 +536,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) } if (!a5xx_gpu->pfp_bo) { - a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp, - &a5xx_gpu->pfp_iova); + a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, + adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova); if (IS_ERR(a5xx_gpu->pfp_bo)) { ret = PTR_ERR(a5xx_gpu->pfp_bo); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 6630e6c0c8be..e9c0e56dbec0 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -261,7 +261,6 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct drm_device *drm = gpu->dev; - const struct firmware *fw; uint32_t dwords = 0, offset = 0, bosize; unsigned int *data, *ptr, *cmds; unsigned int cmds_size; @@ -269,15 +268,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) if (a5xx_gpu->gpmu_bo) return; - /* Get the firmware */ - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->powerfw); - if (IS_ERR(fw)) { - DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n", - gpu->name); - return; - } - - data = (unsigned int *) fw->data; + data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data; /* * The first dword is the size of the remaining data in dwords. Use it @@ -285,12 +276,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) * the firmware that we read */ - if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2))) - goto out; + if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 || + (data[0] < 2) || (data[0] >= + (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2))) + return; /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */ if (data[1] != 2) - goto out; + return; cmds = data + data[2] + 3; cmds_size = data[0] - data[2] - 2; @@ -325,8 +318,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); a5xx_gpu->gpmu_dwords = dwords; - goto out; - + return; err: if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); @@ -336,8 +328,4 @@ err: a5xx_gpu->gpmu_bo = NULL; a5xx_gpu->gpmu_iova = 0; a5xx_gpu->gpmu_dwords = 0; - -out: - /* No need to keep that firmware laying around anymore */ - release_firmware(fw); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index d64ceeb0d6f0..f07d3ec7d77b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -30,61 +30,75 @@ static const struct adreno_info gpulist[] = { .rev = ADRENO_REV(3, 0, 5, ANY_ID), .revn = 305, .name = "A305", - .pm4fw = "a300_pm4.fw", - .pfpfw = "a300_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, .gmem = SZ_256K, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(3, 0, 6, 0), .revn = 307, /* because a305c is revn==306 */ .name = "A306", - .pm4fw = "a300_pm4.fw", - .pfpfw = "a300_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, .gmem = SZ_128K, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), .revn = 320, .name = "A320", - .pm4fw = "a300_pm4.fw", - .pfpfw = "a300_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, .gmem = SZ_512K, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(3, 3, 0, ANY_ID), .revn = 330, .name = "A330", - .pm4fw = "a330_pm4.fw", - .pfpfw = "a330_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a330_pm4.fw", + [ADRENO_FW_PFP] = "a330_pfp.fw", + }, .gmem = SZ_1M, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(4, 2, 0, ANY_ID), .revn = 420, .name = "A420", - .pm4fw = "a420_pm4.fw", - .pfpfw = "a420_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, }, { .rev = ADRENO_REV(4, 3, 0, ANY_ID), .revn = 430, .name = "A430", - .pm4fw = "a420_pm4.fw", - .pfpfw = "a420_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, }, { .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", - .pm4fw = "a530_pm4.fw", - .pfpfw = "a530_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2", + }, .gmem = SZ_1M, .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | ADRENO_QUIRK_FAULT_DETECT_MASK, .init = a5xx_gpu_init, - .powerfw = "a530v3_gpmu.fw2", .zapfw = "a530_zap.mdt", }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index de63ff26a062..4a8ee5ec571e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -140,23 +140,24 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) static int adreno_load_fw(struct adreno_gpu *adreno_gpu) { - const struct firmware *fw; + int i; - if (adreno_gpu->pm4) - return 0; + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { + const struct firmware *fw; - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw); - if (IS_ERR(fw)) - return PTR_ERR(fw); - adreno_gpu->pm4 = fw; + if (!adreno_gpu->info->fw[i]) + continue; - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw); - if (IS_ERR(fw)) { - release_firmware(adreno_gpu->pm4); - adreno_gpu->pm4 = NULL; - return PTR_ERR(fw); + /* Skip if the firmware has already been loaded */ + if (adreno_gpu->fw[i]) + continue; + + fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); + if (IS_ERR(fw)) + return PTR_ERR(fw); + + adreno_gpu->fw[i] = fw; } - adreno_gpu->pfp = fw; return 0; } @@ -569,8 +570,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - release_firmware(adreno_gpu->pm4); - release_firmware(adreno_gpu->pfp); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) + release_firmware(adreno_gpu->fw[i]); msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 0a869bb8ee9d..499092af81a2 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -48,6 +48,13 @@ enum adreno_regs { REG_ADRENO_REGISTER_MAX, }; +enum { + ADRENO_FW_PM4 = 0, + ADRENO_FW_PFP = 1, + ADRENO_FW_GPMU = 2, + ADRENO_FW_MAX, +}; + enum adreno_quirks { ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, ADRENO_QUIRK_FAULT_DETECT_MASK = 2, @@ -72,8 +79,7 @@ struct adreno_info { struct adreno_rev rev; uint32_t revn; const char *name; - const char *pm4fw, *pfpfw; - const char *powerfw; + const char *fw[ADRENO_FW_MAX]; uint32_t gmem; enum adreno_quirks quirks; struct msm_gpu *(*init)(struct drm_device *dev); @@ -115,7 +121,7 @@ struct adreno_gpu { } fwloc; /* firmware: */ - const struct firmware *pm4, *pfp; + const struct firmware *fw[ADRENO_FW_MAX]; /* * Register offsets are different between some GPUs. -- cgit v1.2.3 From 9de43e79c10149d29c77ff2c3dae048d1db9cbce Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 1 Feb 2018 12:15:17 -0700 Subject: drm/msm/adreno: Use generic function to load firmware to a buffer object Move a5xx specific code to load firmware into a buffer object to the generic Adreno code. This will come in useful for future targets. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 23 ++--------------------- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 19 +++++++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_gpu.h | 2 ++ 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 517e19c3f9ed..a4f68affc13b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -497,25 +497,6 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) return a5xx_idle(gpu, ring) ? 0 : -EINVAL; } - -static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, - const struct firmware *fw, u64 *iova) -{ - struct drm_gem_object *bo; - void *ptr; - - ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, - MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); - - if (IS_ERR(ptr)) - return ERR_CAST(ptr); - - memcpy(ptr, &fw->data[4], fw->size - 4); - - msm_gem_put_vaddr(bo); - return bo; -} - static int a5xx_ucode_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -523,7 +504,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) int ret; if (!a5xx_gpu->pm4_bo) { - a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, + a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu, adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova); if (IS_ERR(a5xx_gpu->pm4_bo)) { @@ -536,7 +517,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) } if (!a5xx_gpu->pfp_bo) { - a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, + a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu, adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova); if (IS_ERR(a5xx_gpu->pfp_bo)) { diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 4a8ee5ec571e..87133c6c6f91 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -162,6 +162,25 @@ static int adreno_load_fw(struct adreno_gpu *adreno_gpu) return 0; } +struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova) +{ + struct drm_gem_object *bo; + void *ptr; + + ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); + + if (IS_ERR(ptr)) + return ERR_CAST(ptr); + + memcpy(ptr, &fw->data[4], fw->size - 4); + + msm_gem_put_vaddr(bo); + + return bo; +} + int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 499092af81a2..d6b0e7b813f4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -206,6 +206,8 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu) int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); +struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova); int adreno_hw_init(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, -- cgit v1.2.3 From fb48989edb628342af0fad478174ae30b2e1e23a Mon Sep 17 00:00:00 2001 From: Archit Taneja Date: Mon, 12 Feb 2018 12:01:07 +0530 Subject: drm/msm/dsi: Get byte_intf_clk only for versions that need it Newer DSI host controllers (SDM845 in particular) require a new clock called byte_intf_clk. A recent patch tried to add this as an optional clock, but it still set 'ret' to an error number if it didn't find it. This breaks the host's probe for all previous DSI host versions. Instead of setting this up as an optional clock, try to get the clock only for the DSI version that supports it. Fixes: 56558fb ("drm/msm/dsi: Add byte_intf_clk") Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_host.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f675975c2655..62ac614eccf9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -378,11 +378,16 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) goto exit; } - msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); - if (IS_ERR(msm_host->byte_intf_clk)) { - ret = PTR_ERR(msm_host->byte_intf_clk); - pr_debug("%s: can't find byte_intf clock. ret=%d\n", - __func__, ret); + if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G && + cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) { + msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); + if (IS_ERR(msm_host->byte_intf_clk)) { + ret = PTR_ERR(msm_host->byte_intf_clk); + pr_err("%s: can't find byte_intf clock. ret=%d\n", + __func__, ret); + goto exit; + } + } else { msm_host->byte_intf_clk = NULL; } -- cgit v1.2.3 From 5abc7dd7b5a0f51a6c7d9cb3ce72b910ba3cef7b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 2 Feb 2018 06:32:23 -0600 Subject: drm/msm/adreno/a5xx_debugfs: fix potential NULL pointer dereference _minor_ is being dereferenced before it is null checked, hence there is a potential null pointer dereference. Fix this by moving the pointer dereference after _minor_ has been null checked. Fixes: 024ad8df763f ("drm/msm: add a5xx specific debugfs") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 6b279414b9c0..059ec7d394d0 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -159,13 +159,15 @@ DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) { - struct drm_device *dev = minor->dev; + struct drm_device *dev; struct dentry *ent; int ret; if (!minor) return 0; + dev = minor->dev; + ret = drm_debugfs_create_files(a5xx_debugfs_list, ARRAY_SIZE(a5xx_debugfs_list), minor->debugfs_root, minor); -- cgit v1.2.3 From 52749d601a6055da3352842575408fcba6f1da46 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 2 Feb 2018 06:42:33 -0600 Subject: drm/msm/dsi: Fix potential NULL pointer dereference in msm_dsi_modeset_init _dev_ is being dereferenced before it is null checked, hence there is a potential null pointer dereference. Fix this by moving the pointer dereference after _dev_ has been null checked. Fixes: d4e7f38d70ef ("drm/msm/dsi: check msm_dsi and dsi pointers before use") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ee7e090e27b4..b744bcc7d8ad 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -192,13 +192,14 @@ void __exit msm_dsi_unregister(void) int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoder) { - struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_private *priv; struct drm_bridge *ext_bridge; int ret; if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev)) return -EINVAL; + priv = dev->dev_private; msm_dsi->dev = dev; ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); -- cgit v1.2.3 From 14be3200cd5f4ccece246f969c9c3645c3c12bd3 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 12 Feb 2018 08:18:27 -0500 Subject: drm/msm: rename mdp->disp Since new display controller is called "dpu" instead of "mdp". Lets make the name of the toplevel directory for the display controllers a bit more generic. Signed-off-by: Rob Clark Reviewed-by: Sean Paul --- drivers/gpu/drm/msm/Makefile | 46 +- drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h | 1174 ++++++++++++ drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 670 +++++++ drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c | 190 ++ drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c | 282 +++ drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c | 121 ++ drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 572 ++++++ drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h | 249 +++ drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c | 503 +++++ .../gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c | 135 ++ drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c | 172 ++ drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c | 419 +++++ drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h | 1968 ++++++++++++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c | 652 +++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h | 133 ++ drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c | 230 +++ drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 1194 ++++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 779 ++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h | 86 + drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c | 445 +++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c | 136 ++ drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 1067 +++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 325 ++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c | 282 +++ drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c | 172 ++ drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h | 47 + drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c | 176 ++ drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h | 57 + drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 1137 +++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 411 ++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h | 98 + drivers/gpu/drm/msm/disp/mdp_common.xml.h | 104 ++ drivers/gpu/drm/msm/disp/mdp_format.c | 191 ++ drivers/gpu/drm/msm/disp/mdp_kms.c | 149 ++ drivers/gpu/drm/msm/disp/mdp_kms.h | 148 ++ drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h | 1174 ------------ drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 670 ------- drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c | 190 -- drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | 282 --- drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | 121 -- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 572 ------ drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 249 --- drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c | 503 ----- drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c | 135 -- drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c | 172 -- drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 419 ----- drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | 1968 -------------------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | 652 ------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | 133 -- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 230 --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 1194 ------------ drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c | 779 -------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h | 86 - drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 445 ----- drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | 136 -- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 1067 ----------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 325 ---- drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c | 282 --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c | 172 -- drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h | 47 - drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c | 176 -- drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h | 57 - drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 1137 ----------- drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c | 411 ---- drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h | 98 - drivers/gpu/drm/msm/mdp/mdp_common.xml.h | 104 -- drivers/gpu/drm/msm/mdp/mdp_format.c | 191 -- drivers/gpu/drm/msm/mdp/mdp_kms.c | 149 -- drivers/gpu/drm/msm/mdp/mdp_kms.h | 148 -- 69 files changed, 14497 insertions(+), 14497 deletions(-) create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h create mode 100644 drivers/gpu/drm/msm/disp/mdp_common.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp_format.c create mode 100644 drivers/gpu/drm/msm/disp/mdp_kms.c create mode 100644 drivers/gpu/drm/msm/disp/mdp_kms.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp_common.xml.h delete mode 100644 drivers/gpu/drm/msm/mdp/mdp_format.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp_kms.c delete mode 100644 drivers/gpu/drm/msm/mdp/mdp_kms.h diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index f74d449476f4..cd40c050b2d7 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -25,26 +25,26 @@ msm-y := \ edp/edp_connector.o \ edp/edp_ctrl.o \ edp/edp_phy.o \ - mdp/mdp_format.o \ - mdp/mdp_kms.o \ - mdp/mdp4/mdp4_crtc.o \ - mdp/mdp4/mdp4_dtv_encoder.o \ - mdp/mdp4/mdp4_lcdc_encoder.o \ - mdp/mdp4/mdp4_lvds_connector.o \ - mdp/mdp4/mdp4_irq.o \ - mdp/mdp4/mdp4_kms.o \ - mdp/mdp4/mdp4_plane.o \ - mdp/mdp5/mdp5_cfg.o \ - mdp/mdp5/mdp5_ctl.o \ - mdp/mdp5/mdp5_crtc.o \ - mdp/mdp5/mdp5_encoder.o \ - mdp/mdp5/mdp5_irq.o \ - mdp/mdp5/mdp5_mdss.o \ - mdp/mdp5/mdp5_kms.o \ - mdp/mdp5/mdp5_pipe.o \ - mdp/mdp5/mdp5_mixer.o \ - mdp/mdp5/mdp5_plane.o \ - mdp/mdp5/mdp5_smp.o \ + disp/mdp_format.o \ + disp/mdp_kms.o \ + disp/mdp4/mdp4_crtc.o \ + disp/mdp4/mdp4_dtv_encoder.o \ + disp/mdp4/mdp4_lcdc_encoder.o \ + disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_irq.o \ + disp/mdp4/mdp4_kms.o \ + disp/mdp4/mdp4_plane.o \ + disp/mdp5/mdp5_cfg.o \ + disp/mdp5/mdp5_ctl.o \ + disp/mdp5/mdp5_crtc.o \ + disp/mdp5/mdp5_encoder.o \ + disp/mdp5/mdp5_irq.o \ + disp/mdp5/mdp5_mdss.o \ + disp/mdp5/mdp5_kms.o \ + disp/mdp5/mdp5_pipe.o \ + disp/mdp5/mdp5_mixer.o \ + disp/mdp5/mdp5_plane.o \ + disp/mdp5/mdp5_smp.o \ msm_atomic.o \ msm_debugfs.o \ msm_drv.o \ @@ -65,19 +65,19 @@ msm-y := \ msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o +msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ - mdp/mdp4/mdp4_dsi_encoder.o \ + disp/mdp4/mdp4_dsi_encoder.o \ dsi/dsi_cfg.o \ dsi/dsi_host.o \ dsi/dsi_manager.o \ dsi/phy/dsi_phy.o \ - mdp/mdp5/mdp5_cmd_encoder.o + disp/mdp5/mdp5_cmd_encoder.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h new file mode 100644 index 000000000000..576cea30d391 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h @@ -0,0 +1,1174 @@ +#ifndef MDP4_XML +#define MDP4_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) + +Copyright (C) 2013-2017 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mdp4_pipe { + VG1 = 0, + VG2 = 1, + RGB1 = 2, + RGB2 = 3, + RGB3 = 4, + VG3 = 5, + VG4 = 6, +}; + +enum mdp4_mixer { + MIXER0 = 0, + MIXER1 = 1, + MIXER2 = 2, +}; + +enum mdp4_intf { + INTF_LCDC_DTV = 0, + INTF_DSI_VIDEO = 1, + INTF_DSI_CMD = 2, + INTF_EBI2_TV = 3, +}; + +enum mdp4_cursor_format { + CURSOR_ARGB = 1, + CURSOR_XRGB = 2, +}; + +enum mdp4_frame_format { + FRAME_LINEAR = 0, + FRAME_TILE_ARGB_4X4 = 1, + FRAME_TILE_YCBCR_420 = 2, +}; + +enum mdp4_scale_unit { + SCALE_FIR = 0, + SCALE_MN_PHASE = 1, + SCALE_PIXEL_RPT = 2, +}; + +enum mdp4_dma { + DMA_P = 0, + DMA_S = 1, + DMA_E = 2, +}; + +#define MDP4_IRQ_OVERLAY0_DONE 0x00000001 +#define MDP4_IRQ_OVERLAY1_DONE 0x00000002 +#define MDP4_IRQ_DMA_S_DONE 0x00000004 +#define MDP4_IRQ_DMA_E_DONE 0x00000008 +#define MDP4_IRQ_DMA_P_DONE 0x00000010 +#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020 +#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040 +#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080 +#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100 +#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200 +#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400 +#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800 +#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000 +#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000 +#define MDP4_IRQ_OVERLAY2_DONE 0x40000000 +#define REG_MDP4_VERSION 0x00000000 +#define MDP4_VERSION_MINOR__MASK 0x00ff0000 +#define MDP4_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP4_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK; +} +#define MDP4_VERSION_MAJOR__MASK 0xff000000 +#define MDP4_VERSION_MAJOR__SHIFT 24 +static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK; +} + +#define REG_MDP4_OVLP0_KICK 0x00000004 + +#define REG_MDP4_OVLP1_KICK 0x00000008 + +#define REG_MDP4_OVLP2_KICK 0x000000d0 + +#define REG_MDP4_DMA_P_KICK 0x0000000c + +#define REG_MDP4_DMA_S_KICK 0x00000010 + +#define REG_MDP4_DMA_E_KICK 0x00000014 + +#define REG_MDP4_DISP_STATUS 0x00000018 + +#define REG_MDP4_DISP_INTF_SEL 0x00000038 +#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003 +#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0 +static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK; +} +#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c +#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2 +static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK; +} +#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030 +#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4 +static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK; +} +#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040 +#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080 + +#define REG_MDP4_RESET_STATUS 0x0000003c + +#define REG_MDP4_READ_CNFG 0x0000004c + +#define REG_MDP4_INTR_ENABLE 0x00000050 + +#define REG_MDP4_INTR_STATUS 0x00000054 + +#define REG_MDP4_INTR_CLEAR 0x00000058 + +#define REG_MDP4_EBI2_LCD0 0x00000060 + +#define REG_MDP4_EBI2_LCD1 0x00000064 + +#define REG_MDP4_PORTMAP_MODE 0x00000070 + +#define REG_MDP4_CS_CONTROLLER0 0x000000c0 + +#define REG_MDP4_CS_CONTROLLER1 0x000000c4 + +#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000 + +#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc + +#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 +#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 +#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 +#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 +#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 +#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 +#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 +#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000 + +#define REG_MDP4_VG2_SRC_FORMAT 0x00030050 + +#define REG_MDP4_VG2_CONST_COLOR 0x00031008 + +#define REG_MDP4_OVERLAY_FLUSH 0x00018000 +#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001 +#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002 +#define MDP4_OVERLAY_FLUSH_VG1 0x00000004 +#define MDP4_OVERLAY_FLUSH_VG2 0x00000008 +#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010 +#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020 + +static inline uint32_t __offset_OVLP(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00010000; + case 1: return 0x00018000; + case 2: return 0x00088000; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); } +#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK; +} +#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); } + +static inline uint32_t __offset_STAGE(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000104; + case 1: return 0x00000124; + case 2: return 0x00000144; + case 3: return 0x00000160; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } +#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 +#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 +static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; +} +#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004 +#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 +#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 +#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 +static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; +} +#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040 +#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080 +#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100 +#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200 + +static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t __offset_STAGE_CO3(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00001004; + case 1: return 0x00001404; + case 2: return 0x00001804; + case 3: return 0x00001b84; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } +#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001 + +static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); } + + +static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } + +#define REG_MDP4_DMA_P_OP_MODE 0x00090070 + +static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; } + +static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } + +#define REG_MDP4_DMA_S_OP_MODE 0x000a0028 + +static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; } + +static inline uint32_t __offset_DMA(enum mdp4_dma idx) +{ + switch (idx) { + case DMA_P: return 0x00090000; + case DMA_S: return 0x000a0000; + case DMA_E: return 0x000b0000; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } +#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 +#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 +static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; +} +#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c +#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 +static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; +} +#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 +#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 +static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; +} +#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080 +#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00 +#define MDP4_DMA_CONFIG_PACK__SHIFT 8 +static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val) +{ + return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK; +} +#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000 +#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000 + +static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); } +#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK; +} +#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); } +#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK; +} +#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f +#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK; +} +#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000 +#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff +#define MDP4_DMA_CURSOR_POS_X__SHIFT 0 +static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK; +} +#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000 +#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16 +static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001 +#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006 +#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1 +static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val) +{ + return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK; +} +#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008 + +static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); } + + +static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } +#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; } +#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 +#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK; +} +#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff +#define MDP4_PIPE_SRC_XY_X__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; } +#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; } +#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 +#define MDP4_PIPE_DST_XY_Y__SHIFT 16 +static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK; +} +#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff +#define MDP4_PIPE_DST_XY_X__SHIFT 0 +static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; } +#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff +#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK; +} +#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 +#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; } +#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff +#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK; +} +#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 +#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } +#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } +#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 +#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c +#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 +#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 +#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 +#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 +#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000 +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; } +#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff +#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 +#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 +#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 +#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; } +#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 +#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK; +} +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030 +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK; +} +#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 +#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 +#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 +#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000 +#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000 +#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000 +#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000 +#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 +#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 + +static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; } + + +static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } + +#define REG_MDP4_LCDC 0x000c0000 + +#define REG_MDP4_LCDC_ENABLE 0x000c0000 + +#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004 +#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008 + +#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c + +#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010 +#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014 + +#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018 + +#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c +#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK; +} +#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK; +} +#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020 + +#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024 + +#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028 + +#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c +#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030 + +#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034 + +#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038 +#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + +#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000 +#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004 +#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008 +#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040 +#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000 + +static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; } + +static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; } +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00 +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000 +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000 +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK; +} + +static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; } +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00 +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000 +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK; +} + +#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024 + +#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080 + +#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108 + +#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100 +#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010 +#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040 +#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080 + +#define REG_MDP4_DTV 0x000d0000 + +#define REG_MDP4_DTV_ENABLE 0x000d0000 + +#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004 +#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008 + +#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c + +#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018 +#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c + +#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020 + +#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c +#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK; +} +#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK; +} +#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030 + +#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038 + +#define REG_MDP4_DTV_BORDER_CLR 0x000d0040 + +#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044 +#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048 + +#define REG_MDP4_DTV_TEST_CNTL 0x000d004c + +#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050 +#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + +#define REG_MDP4_DSI 0x000e0000 + +#define REG_MDP4_DSI_ENABLE 0x000e0000 + +#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004 +#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008 + +#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c + +#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010 +#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014 + +#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018 + +#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c +#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK; +} +#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK; +} +#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020 + +#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024 + +#define REG_MDP4_DSI_BORDER_CLR 0x000e0028 + +#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c +#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030 + +#define REG_MDP4_DSI_TEST_CNTL 0x000e0034 + +#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038 +#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + + +#endif /* MDP4_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c new file mode 100644 index 000000000000..6e5e1aa54ce1 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -0,0 +1,670 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_crtc { + struct drm_crtc base; + char name[8]; + int id; + int ovlp; + enum mdp4_dma dma; + bool enabled; + + /* which mixer/encoder we route output to: */ + int mixer; + + struct { + spinlock_t lock; + bool stale; + uint32_t width, height; + uint32_t x, y; + + /* next cursor to scan-out: */ + uint32_t next_iova; + struct drm_gem_object *next_bo; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + } cursor; + + + /* if there is a pending flip, these will be non-null: */ + struct drm_pending_vblank_event *event; + + /* Bits have been flushed at the last commit, + * used to decide if a vsync has happened since last commit. + */ + u32 flushed_mask; + +#define PENDING_CURSOR 0x1 +#define PENDING_FLIP 0x2 + atomic_t pending; + + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + + struct mdp_irq vblank; + struct mdp_irq err; +}; +#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) + +static struct mdp4_kms *get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void request_pending(struct drm_crtc *crtc, uint32_t pending) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + atomic_or(pending, &mdp4_crtc->pending); + mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); +} + +static void crtc_flush(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_plane *plane; + uint32_t flush = 0; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + flush |= pipe2flush(pipe_id); + } + + flush |= ovlp2flush(mdp4_crtc->ovlp); + + DBG("%s: flush=%08x", mdp4_crtc->name, flush); + + mdp4_crtc->flushed_mask = flush; + + mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); +} + +/* if file!=NULL, this is preclose potential cancel-flip path */ +static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_pending_vblank_event *event; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = mdp4_crtc->event; + if (event) { + mdp4_crtc->event = NULL; + DBG("%s: send event: %p", mdp4_crtc->name, event); + drm_crtc_send_vblank_event(crtc, event); + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp4_crtc *mdp4_crtc = + container_of(work, struct mdp4_crtc, unref_cursor_work); + struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); + struct msm_kms *kms = &mdp4_kms->base.base; + + msm_gem_put_iova(val, kms->aspace); + drm_gem_object_put_unlocked(val); +} + +static void mdp4_crtc_destroy(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); + + kfree(mdp4_crtc); +} + +/* statically (for now) map planes to mixer stage (z-order): */ +static const int idxs[] = { + [VG1] = 1, + [VG2] = 2, + [RGB1] = 0, + [RGB2] = 0, + [RGB3] = 0, + [VG3] = 3, + [VG4] = 4, + +}; + +/* setup mixer config, for which we need to consider all crtc's and + * the planes attached to them + * + * TODO may possibly need some extra locking here + */ +static void setup_mixer(struct mdp4_kms *mdp4_kms) +{ + struct drm_mode_config *config = &mdp4_kms->dev->mode_config; + struct drm_crtc *crtc; + uint32_t mixer_cfg = 0; + static const enum mdp_mixer_stage_id stages[] = { + STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, + }; + + list_for_each_entry(crtc, &config->crtc_list, head) { + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_plane *plane; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + int idx = idxs[pipe_id]; + mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, + pipe_id, stages[idx]); + } + } + + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); +} + +static void blend_setup(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_plane *plane; + int i, ovlp = mdp4_crtc->ovlp; + bool alpha[4]= { false, false, false, false }; + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); + + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + int idx = idxs[pipe_id]; + if (idx > 0) { + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(plane->fb)); + alpha[idx-1] = format->alpha_enable; + } + } + + for (i = 0; i < 4; i++) { + uint32_t op; + + if (alpha[i]) { + op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) | + MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) | + MDP4_OVLP_STAGE_OP_BG_INV_ALPHA; + } else { + op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | + MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST); + } + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); + } + + setup_mixer(mdp4_kms); +} + +static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + enum mdp4_dma dma = mdp4_crtc->dma; + int ovlp = mdp4_crtc->ovlp; + struct drm_display_mode *mode; + + if (WARN_ON(!crtc->state)) + return; + + mode = &crtc->state->adjusted_mode; + + DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mdp4_crtc->name, mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), + MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | + MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); + + /* take data from pipe: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), + MDP4_DMA_DST_SIZE_WIDTH(0) | + MDP4_DMA_DST_SIZE_HEIGHT(0)); + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), + MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | + MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0); + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); + + if (dma == DMA_E) { + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); + } +} + +static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + DBG("%s", mdp4_crtc->name); + + if (WARN_ON(!mdp4_crtc->enabled)) + return; + + /* Disable/save vblank irq handling before power is disabled */ + drm_crtc_vblank_off(crtc); + + mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); + mdp4_disable(mdp4_kms); + + mdp4_crtc->enabled = false; +} + +static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + DBG("%s", mdp4_crtc->name); + + if (WARN_ON(mdp4_crtc->enabled)) + return; + + mdp4_enable(mdp4_kms); + + /* Restore vblank irq handling after power is enabled */ + drm_crtc_vblank_on(crtc); + + mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); + + crtc_flush(crtc); + + mdp4_crtc->enabled = true; +} + +static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + DBG("%s: check", mdp4_crtc->name); + // TODO anything else to check? + return 0; +} + +static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + DBG("%s: begin", mdp4_crtc->name); +} + +static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); + + WARN_ON(mdp4_crtc->event); + + spin_lock_irqsave(&dev->event_lock, flags); + mdp4_crtc->event = crtc->state->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + blend_setup(crtc); + crtc_flush(crtc); + request_pending(crtc, PENDING_FLIP); +} + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + +/* called from IRQ to update cursor related registers (if needed). The + * cursor registers, other than x/y position, appear not to be double + * buffered, and changing them other than from vblank seems to trigger + * underflow. + */ +static void update_cursor(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct msm_kms *kms = &mdp4_kms->base.base; + enum mdp4_dma dma = mdp4_crtc->dma; + unsigned long flags; + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + if (mdp4_crtc->cursor.stale) { + struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; + struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; + uint64_t iova = mdp4_crtc->cursor.next_iova; + + if (next_bo) { + /* take a obj ref + iova ref when we start scanning out: */ + drm_gem_object_get(next_bo); + msm_gem_get_iova(next_bo, kms->aspace, &iova); + + /* enable cursor: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), + MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) | + MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height)); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), + MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) | + MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); + } else { + /* disable cursor: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), + mdp4_kms->blank_cursor_iova); + } + + /* and drop the iova ref + obj rev when done scanning out: */ + if (prev_bo) + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo); + + mdp4_crtc->cursor.scanout_bo = next_bo; + mdp4_crtc->cursor.stale = false; + } + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), + MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | + MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); + + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); +} + +static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct msm_kms *kms = &mdp4_kms->base.base; + struct drm_device *dev = crtc->dev; + struct drm_gem_object *cursor_bo, *old_bo; + unsigned long flags; + uint64_t iova; + int ret; + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + if (handle) { + cursor_bo = drm_gem_object_lookup(file_priv, handle); + if (!cursor_bo) + return -ENOENT; + } else { + cursor_bo = NULL; + } + + if (cursor_bo) { + ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova); + if (ret) + goto fail; + } else { + iova = 0; + } + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + old_bo = mdp4_crtc->cursor.next_bo; + mdp4_crtc->cursor.next_bo = cursor_bo; + mdp4_crtc->cursor.next_iova = iova; + mdp4_crtc->cursor.width = width; + mdp4_crtc->cursor.height = height; + mdp4_crtc->cursor.stale = true; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + if (old_bo) { + /* drop our previous reference: */ + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); + } + + request_pending(crtc, PENDING_CURSOR); + + return 0; + +fail: + drm_gem_object_put_unlocked(cursor_bo); + return ret; +} + +static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + unsigned long flags; + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + mdp4_crtc->cursor.x = x; + mdp4_crtc->cursor.y = y; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + crtc_flush(crtc); + request_pending(crtc, PENDING_CURSOR); + + return 0; +} + +static const struct drm_crtc_funcs mdp4_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp4_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .cursor_set = mdp4_crtc_cursor_set, + .cursor_move = mdp4_crtc_cursor_move, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { + .mode_set_nofb = mdp4_crtc_mode_set_nofb, + .atomic_check = mdp4_crtc_atomic_check, + .atomic_begin = mdp4_crtc_atomic_begin, + .atomic_flush = mdp4_crtc_atomic_flush, + .atomic_enable = mdp4_crtc_atomic_enable, + .atomic_disable = mdp4_crtc_atomic_disable, +}; + +static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); + struct drm_crtc *crtc = &mdp4_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; + unsigned pending; + + mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); + + pending = atomic_xchg(&mdp4_crtc->pending, 0); + + if (pending & PENDING_FLIP) { + complete_flip(crtc, NULL); + } + + if (pending & PENDING_CURSOR) { + update_cursor(crtc); + drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); + } +} + +static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); + struct drm_crtc *crtc = &mdp4_crtc->base; + DBG("%s: error: %08x", mdp4_crtc->name, irqstatus); + crtc_flush(crtc); +} + +static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + int ret; + + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) & + mdp4_crtc->flushed_mask), + msecs_to_jiffies(50)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id); + + mdp4_crtc->flushed_mask = 0; + + drm_crtc_vblank_put(crtc); +} + +uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + return mdp4_crtc->vblank.irqmask; +} + +/* set dma config, ie. the format the encoder wants. */ +void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config); +} + +/* set interface for routing crtc->encoder: */ +void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + uint32_t intf_sel; + + intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL); + + switch (mdp4_crtc->dma) { + case DMA_P: + intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf); + break; + case DMA_S: + intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf); + break; + case DMA_E: + intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf); + break; + } + + if (intf == INTF_DSI_VIDEO) { + intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD; + intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO; + } else if (intf == INTF_DSI_CMD) { + intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO; + intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD; + } + + mdp4_crtc->mixer = mixer; + + blend_setup(crtc); + + DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel); + + mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); +} + +void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc) +{ + /* wait_for_flush_done is the only case for now. + * Later we will have command mode CRTC to wait for + * other event. + */ + mdp4_crtc_wait_for_flush_done(crtc); +} + +static const char *dma_names[] = { + "DMA_P", "DMA_S", "DMA_E", +}; + +/* initialize crtc */ +struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, + struct drm_plane *plane, int id, int ovlp_id, + enum mdp4_dma dma_id) +{ + struct drm_crtc *crtc = NULL; + struct mdp4_crtc *mdp4_crtc; + + mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); + if (!mdp4_crtc) + return ERR_PTR(-ENOMEM); + + crtc = &mdp4_crtc->base; + + mdp4_crtc->id = id; + + mdp4_crtc->ovlp = ovlp_id; + mdp4_crtc->dma = dma_id; + + mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma); + mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq; + + mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma); + mdp4_crtc->err.irq = mdp4_crtc_err_irq; + + snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d", + dma_names[dma_id], ovlp_id); + + spin_lock_init(&mdp4_crtc->cursor.lock); + + drm_flip_work_init(&mdp4_crtc->unref_cursor_work, + "unref cursor", unref_cursor_worker); + + drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, + NULL); + drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); + plane->crtc = crtc; + + return crtc; +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c new file mode 100644 index 000000000000..6a1ebdace391 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, Inforce Computing. All rights reserved. + * + * Author: Vinay Simha + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_dsi_encoder { + struct drm_encoder base; + struct drm_panel *panel; + bool enabled; +}; +#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(mdp4_dsi_encoder); +} + +static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = { + .destroy = mdp4_dsi_encoder_destroy, +}; + +static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + dsi_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL, + MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL, + MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end); + + mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR, + MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL, + MDP4_DSI_ACTIVE_HCTL_START(0) | + MDP4_DSI_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0); +} + +static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (!mdp4_dsi_encoder->enabled) + return; + + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); + + mdp4_dsi_encoder->enabled = false; +} + +static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (mdp4_dsi_encoder->enabled) + return; + + mdp4_crtc_set_config(encoder->crtc, + MDP4_DMA_CONFIG_PACK_ALIGN_MSB | + MDP4_DMA_CONFIG_DEFLKR_EN | + MDP4_DMA_CONFIG_DITHER_EN | + MDP4_DMA_CONFIG_R_BPC(BPC8) | + MDP4_DMA_CONFIG_G_BPC(BPC8) | + MDP4_DMA_CONFIG_B_BPC(BPC8) | + MDP4_DMA_CONFIG_PACK(0x21)); + + mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0); + + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1); + + mdp4_dsi_encoder->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = { + .mode_set = mdp4_dsi_encoder_mode_set, + .disable = mdp4_dsi_encoder_disable, + .enable = mdp4_dsi_encoder_enable, +}; + +/* initialize encoder */ +struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_dsi_encoder *mdp4_dsi_encoder; + int ret; + + mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL); + if (!mdp4_dsi_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp4_dsi_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI, NULL); + drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs); + + return encoder; + +fail: + if (encoder) + mdp4_dsi_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c new file mode 100644 index 000000000000..ba8e587f734b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c @@ -0,0 +1,282 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_dtv_encoder { + struct drm_encoder base; + struct clk *hdmi_clk; + struct clk *mdp_clk; + unsigned long int pixclock; + bool enabled; + uint32_t bsc; +}; +#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING +#include +/* not ironically named at all.. no, really.. */ +static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) +{ + struct drm_device *dev = mdp4_dtv_encoder->base.dev; + struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0"); + + if (!dtv_pdata) { + dev_err(dev->dev, "could not find dtv pdata\n"); + return; + } + + if (dtv_pdata->bus_scale_table) { + mdp4_dtv_encoder->bsc = msm_bus_scale_register_client( + dtv_pdata->bus_scale_table); + DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc); + DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save); + if (dtv_pdata->lcdc_power_save) + dtv_pdata->lcdc_power_save(1); + } +} + +static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) +{ + if (mdp4_dtv_encoder->bsc) { + msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc); + mdp4_dtv_encoder->bsc = 0; + } +} + +static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) +{ + if (mdp4_dtv_encoder->bsc) { + DBG("set bus scaling: %d", idx); + msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx); + } +} +#else +static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} +static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} +static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {} +#endif + +static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + bs_fini(mdp4_dtv_encoder); + drm_encoder_cleanup(encoder); + kfree(mdp4_dtv_encoder); +} + +static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { + .destroy = mdp4_dtv_encoder_destroy, +}; + +static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + mdp4_dtv_encoder->pixclock = mode->clock * 1000; + + DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + dtv_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL, + MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL, + MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end); + mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR, + MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL, + MDP4_DTV_ACTIVE_HCTL_START(0) | + MDP4_DTV_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); +} + +static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (WARN_ON(!mdp4_dtv_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); + + clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); + clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); + + bs_set(mdp4_dtv_encoder, 0); + + mdp4_dtv_encoder->enabled = false; +} + +static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + unsigned long pc = mdp4_dtv_encoder->pixclock; + int ret; + + if (WARN_ON(mdp4_dtv_encoder->enabled)) + return; + + mdp4_crtc_set_config(encoder->crtc, + MDP4_DMA_CONFIG_R_BPC(BPC8) | + MDP4_DMA_CONFIG_G_BPC(BPC8) | + MDP4_DMA_CONFIG_B_BPC(BPC8) | + MDP4_DMA_CONFIG_PACK(0x21)); + mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); + + bs_set(mdp4_dtv_encoder, 1); + + DBG("setting mdp_clk=%lu", pc); + + ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); + if (ret) + dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n", + pc, ret); + + ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); + if (ret) + dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); + + ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); + if (ret) + dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); + + mdp4_dtv_encoder->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { + .mode_set = mdp4_dtv_encoder_mode_set, + .enable = mdp4_dtv_encoder_enable, + .disable = mdp4_dtv_encoder_disable, +}; + +long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate); +} + +/* initialize encoder */ +struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_dtv_encoder *mdp4_dtv_encoder; + int ret; + + mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL); + if (!mdp4_dtv_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp4_dtv_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); + + mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); + if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { + dev_err(dev->dev, "failed to get hdmi_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); + goto fail; + } + + mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); + if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { + dev_err(dev->dev, "failed to get tv_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); + goto fail; + } + + bs_init(mdp4_dtv_encoder); + + return encoder; + +fail: + if (encoder) + mdp4_dtv_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c new file mode 100644 index 000000000000..b764d7f10312 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "msm_drv.h" +#include "mdp4_kms.h" + +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) +{ + mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); + mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); +} + +static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler); + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); + extern bool dumpstate; + + DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); + + if (dumpstate && __ratelimit(&rs)) { + struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev); + drm_state_dump(mdp4_kms->dev, &p); + } +} + +void mdp4_irq_preinstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + mdp4_enable(mdp4_kms); + mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); + mdp4_disable(mdp4_kms); +} + +int mdp4_irq_postinstall(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); + struct mdp_irq *error_handler = &mdp4_kms->error_handler; + + error_handler->irq = mdp4_irq_error_handler; + error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | + MDP4_IRQ_EXTERNAL_INTF_UDERRUN; + + mdp_irq_register(mdp_kms, error_handler); + + return 0; +} + +void mdp4_irq_uninstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + mdp4_enable(mdp4_kms); + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); + mdp4_disable(mdp4_kms); +} + +irqreturn_t mdp4_irq(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + unsigned int id; + uint32_t status, enable; + + enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE); + status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable; + mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); + + VERB("status=%08x", status); + + mdp_dispatch_irqs(mdp_kms, status); + + for (id = 0; id < priv->num_crtcs; id++) + if (status & mdp4_crtc_vblank(priv->crtcs[id])) + drm_handle_vblank(dev, id); + + return IRQ_HANDLED; +} + +int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp4_crtc_vblank(crtc), true); + mdp4_disable(mdp4_kms); + + return 0; +} + +void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp4_crtc_vblank(crtc), false); + mdp4_disable(mdp4_kms); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c new file mode 100644 index 000000000000..4b646bf9c214 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -0,0 +1,572 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" +#include "mdp4_kms.h" + +static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); + +static int mdp4_hw_init(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct drm_device *dev = mdp4_kms->dev; + uint32_t version, major, minor, dmap_cfg, vg_cfg; + unsigned long clk; + int ret = 0; + + pm_runtime_get_sync(dev->dev); + + mdp4_enable(mdp4_kms); + version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); + mdp4_disable(mdp4_kms); + + major = FIELD(version, MDP4_VERSION_MAJOR); + minor = FIELD(version, MDP4_VERSION_MINOR); + + DBG("found MDP4 version v%d.%d", major, minor); + + if (major != 4) { + dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto out; + } + + mdp4_kms->rev = minor; + + if (mdp4_kms->rev > 1) { + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); + } + + mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3); + + /* max read pending cmd config, 3 pending requests: */ + mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222); + + clk = clk_get_rate(mdp4_kms->clk); + + if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) { + dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */ + vg_cfg = 0x47; /* 16 bytes-burs x 8 req */ + } else { + dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */ + vg_cfg = 0x43; /* 16 bytes-burst x 4 req */ + } + + DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg); + mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg); + + if (mdp4_kms->rev >= 2) + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1); + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0); + + /* disable CSC matrix / YUV by default: */ + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0); + + if (mdp4_kms->rev > 1) + mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); + + dev->mode_config.allow_fb_modifiers = true; + +out: + pm_runtime_put_sync(dev->dev); + + return ret; +} + +static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + mdp4_enable(mdp4_kms); + + /* see 119ecb7fd */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) + drm_crtc_vblank_get(crtc); +} + +static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + /* see 119ecb7fd */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) + drm_crtc_vblank_put(crtc); + + mdp4_disable(mdp4_kms); +} + +static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms, + struct drm_crtc *crtc) +{ + mdp4_crtc_wait_for_commit_done(crtc); +} + +static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, + struct drm_encoder *encoder) +{ + /* if we had >1 encoder, we'd need something more clever: */ + switch (encoder->encoder_type) { + case DRM_MODE_ENCODER_TMDS: + return mdp4_dtv_round_pixclk(encoder, rate); + case DRM_MODE_ENCODER_LVDS: + case DRM_MODE_ENCODER_DSI: + default: + return rate; + } +} + +static const char * const iommu_ports[] = { + "mdp_port0_cb0", "mdp_port1_cb0", +}; + +static void mdp4_destroy(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct device *dev = mdp4_kms->dev->dev; + struct msm_gem_address_space *aspace = kms->aspace; + + if (mdp4_kms->blank_cursor_iova) + msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace); + drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_put(aspace); + } + + if (mdp4_kms->rpm_enabled) + pm_runtime_disable(dev); + + kfree(mdp4_kms); +} + +static const struct mdp_kms_funcs kms_funcs = { + .base = { + .hw_init = mdp4_hw_init, + .irq_preinstall = mdp4_irq_preinstall, + .irq_postinstall = mdp4_irq_postinstall, + .irq_uninstall = mdp4_irq_uninstall, + .irq = mdp4_irq, + .enable_vblank = mdp4_enable_vblank, + .disable_vblank = mdp4_disable_vblank, + .prepare_commit = mdp4_prepare_commit, + .complete_commit = mdp4_complete_commit, + .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done, + .get_format = mdp_get_format, + .round_pixclk = mdp4_round_pixclk, + .destroy = mdp4_destroy, + }, + .set_irqmask = mdp4_set_irqmask, +}; + +int mdp4_disable(struct mdp4_kms *mdp4_kms) +{ + DBG(""); + + clk_disable_unprepare(mdp4_kms->clk); + if (mdp4_kms->pclk) + clk_disable_unprepare(mdp4_kms->pclk); + clk_disable_unprepare(mdp4_kms->lut_clk); + if (mdp4_kms->axi_clk) + clk_disable_unprepare(mdp4_kms->axi_clk); + + return 0; +} + +int mdp4_enable(struct mdp4_kms *mdp4_kms) +{ + DBG(""); + + clk_prepare_enable(mdp4_kms->clk); + if (mdp4_kms->pclk) + clk_prepare_enable(mdp4_kms->pclk); + clk_prepare_enable(mdp4_kms->lut_clk); + if (mdp4_kms->axi_clk) + clk_prepare_enable(mdp4_kms->axi_clk); + + return 0; +} + + +static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, + int intf_type) +{ + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct device_node *panel_node; + int dsi_id; + int ret; + + switch (intf_type) { + case DRM_MODE_ENCODER_LVDS: + /* + * bail out early if there is no panel node (no need to + * initialize LCDC encoder and LVDS connector) + */ + panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0); + if (!panel_node) + return 0; + + encoder = mdp4_lcdc_encoder_init(dev, panel_node); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct LCDC encoder\n"); + return PTR_ERR(encoder); + } + + /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */ + encoder->possible_crtcs = 1 << DMA_P; + + connector = mdp4_lvds_connector_init(dev, panel_node, encoder); + if (IS_ERR(connector)) { + dev_err(dev->dev, "failed to initialize LVDS connector\n"); + return PTR_ERR(connector); + } + + priv->encoders[priv->num_encoders++] = encoder; + priv->connectors[priv->num_connectors++] = connector; + + break; + case DRM_MODE_ENCODER_TMDS: + encoder = mdp4_dtv_encoder_init(dev); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct DTV encoder\n"); + return PTR_ERR(encoder); + } + + /* DTV can be hooked to DMA_E: */ + encoder->possible_crtcs = 1 << 1; + + if (priv->hdmi) { + /* Construct bridge/connector for HDMI: */ + ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); + if (ret) { + dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); + return ret; + } + } + + priv->encoders[priv->num_encoders++] = encoder; + + break; + case DRM_MODE_ENCODER_DSI: + /* only DSI1 supported for now */ + dsi_id = 0; + + if (!priv->dsi[dsi_id]) + break; + + encoder = mdp4_dsi_encoder_init(dev); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + dev_err(dev->dev, + "failed to construct DSI encoder: %d\n", ret); + return ret; + } + + /* TODO: Add DMA_S later? */ + encoder->possible_crtcs = 1 << DMA_P; + priv->encoders[priv->num_encoders++] = encoder; + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); + if (ret) { + dev_err(dev->dev, "failed to initialize DSI: %d\n", + ret); + return ret; + } + + break; + default: + dev_err(dev->dev, "Invalid or unsupported interface\n"); + return -EINVAL; + } + + return 0; +} + +static int modeset_init(struct mdp4_kms *mdp4_kms) +{ + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i, ret; + static const enum mdp4_pipe rgb_planes[] = { + RGB1, RGB2, + }; + static const enum mdp4_pipe vg_planes[] = { + VG1, VG2, + }; + static const enum mdp4_dma mdp4_crtcs[] = { + DMA_P, DMA_E, + }; + static const char * const mdp4_crtc_names[] = { + "DMA_P", "DMA_E", + }; + static const int mdp4_intfs[] = { + DRM_MODE_ENCODER_LVDS, + DRM_MODE_ENCODER_DSI, + DRM_MODE_ENCODER_TMDS, + }; + + /* construct non-private planes: */ + for (i = 0; i < ARRAY_SIZE(vg_planes); i++) { + plane = mdp4_plane_init(dev, vg_planes[i], false); + if (IS_ERR(plane)) { + dev_err(dev->dev, + "failed to construct plane for VG%d\n", i + 1); + ret = PTR_ERR(plane); + goto fail; + } + priv->planes[priv->num_planes++] = plane; + } + + for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) { + plane = mdp4_plane_init(dev, rgb_planes[i], true); + if (IS_ERR(plane)) { + dev_err(dev->dev, + "failed to construct plane for RGB%d\n", i + 1); + ret = PTR_ERR(plane); + goto fail; + } + + crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i, + mdp4_crtcs[i]); + if (IS_ERR(crtc)) { + dev_err(dev->dev, "failed to construct crtc for %s\n", + mdp4_crtc_names[i]); + ret = PTR_ERR(crtc); + goto fail; + } + + priv->crtcs[priv->num_crtcs++] = crtc; + } + + /* + * we currently set up two relatively fixed paths: + * + * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS + * or + * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel + * + * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI + */ + + for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) { + ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]); + if (ret) { + dev_err(dev->dev, "failed to initialize intf: %d, %d\n", + i, ret); + goto fail; + } + } + + return 0; + +fail: + return ret; +} + +struct msm_kms *mdp4_kms_init(struct drm_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev); + struct mdp4_platform_config *config = mdp4_get_config(pdev); + struct mdp4_kms *mdp4_kms; + struct msm_kms *kms = NULL; + struct msm_gem_address_space *aspace; + int irq, ret; + + mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); + if (!mdp4_kms) { + dev_err(dev->dev, "failed to allocate kms\n"); + ret = -ENOMEM; + goto fail; + } + + mdp_kms_init(&mdp4_kms->base, &kms_funcs); + + kms = &mdp4_kms->base.base; + + mdp4_kms->dev = dev; + + mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4"); + if (IS_ERR(mdp4_kms->mmio)) { + ret = PTR_ERR(mdp4_kms->mmio); + goto fail; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + dev_err(dev->dev, "failed to get irq: %d\n", ret); + goto fail; + } + + kms->irq = irq; + + /* NOTE: driver for this regulator still missing upstream.. use + * _get_exclusive() and ignore the error if it does not exist + * (and hope that the bootloader left it on for us) + */ + mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); + if (IS_ERR(mdp4_kms->vdd)) + mdp4_kms->vdd = NULL; + + if (mdp4_kms->vdd) { + ret = regulator_enable(mdp4_kms->vdd); + if (ret) { + dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); + goto fail; + } + } + + mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(mdp4_kms->clk)) { + dev_err(dev->dev, "failed to get core_clk\n"); + ret = PTR_ERR(mdp4_kms->clk); + goto fail; + } + + mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); + if (IS_ERR(mdp4_kms->pclk)) + mdp4_kms->pclk = NULL; + + // XXX if (rev >= MDP_REV_42) { ??? + mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); + if (IS_ERR(mdp4_kms->lut_clk)) { + dev_err(dev->dev, "failed to get lut_clk\n"); + ret = PTR_ERR(mdp4_kms->lut_clk); + goto fail; + } + + mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(mdp4_kms->axi_clk)) { + dev_err(dev->dev, "failed to get axi_clk\n"); + ret = PTR_ERR(mdp4_kms->axi_clk); + goto fail; + } + + clk_set_rate(mdp4_kms->clk, config->max_clk); + clk_set_rate(mdp4_kms->lut_clk, config->max_clk); + + pm_runtime_enable(dev->dev); + mdp4_kms->rpm_enabled = true; + + /* make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + mdp4_enable(mdp4_kms); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); + mdp4_disable(mdp4_kms); + mdelay(16); + + if (config->iommu) { + aspace = msm_gem_address_space_create(&pdev->dev, + config->iommu, "mdp4"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); + goto fail; + } + + kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, + ARRAY_SIZE(iommu_ports)); + if (ret) + goto fail; + } else { + dev_info(dev->dev, "no iommu, fallback to phys " + "contig buffers for scanout\n"); + aspace = NULL; + } + + ret = modeset_init(mdp4_kms); + if (ret) { + dev_err(dev->dev, "modeset_init failed: %d\n", ret); + goto fail; + } + + mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); + if (IS_ERR(mdp4_kms->blank_cursor_bo)) { + ret = PTR_ERR(mdp4_kms->blank_cursor_bo); + dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); + mdp4_kms->blank_cursor_bo = NULL; + goto fail; + } + + ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace, + &mdp4_kms->blank_cursor_iova); + if (ret) { + dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); + goto fail; + } + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + + return kms; + +fail: + if (kms) + mdp4_destroy(kms); + return ERR_PTR(ret); +} + +static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) +{ + static struct mdp4_platform_config config = {}; + + /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ + config.max_clk = 266667000; + config.iommu = iommu_domain_alloc(&platform_bus_type); + if (config.iommu) { + config.iommu->geometry.aperture_start = 0x1000; + config.iommu->geometry.aperture_end = 0xffffffff; + } + + return &config; +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h new file mode 100644 index 000000000000..0c13f8697bfe --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP4_KMS_H__ +#define __MDP4_KMS_H__ + +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "disp/mdp_kms.h" +#include "mdp4.xml.h" + +struct device_node; + +struct mdp4_kms { + struct mdp_kms base; + + struct drm_device *dev; + + int rev; + + void __iomem *mmio; + + struct regulator *vdd; + + struct clk *clk; + struct clk *pclk; + struct clk *lut_clk; + struct clk *axi_clk; + + struct mdp_irq error_handler; + + bool rpm_enabled; + + /* empty/blank cursor bo to use when cursor is "disabled" */ + struct drm_gem_object *blank_cursor_bo; + uint64_t blank_cursor_iova; +}; +#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) + +/* platform config data (ie. from DT, or pdata) */ +struct mdp4_platform_config { + struct iommu_domain *iommu; + uint32_t max_clk; +}; + +static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) +{ + msm_writel(data, mdp4_kms->mmio + reg); +} + +static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg) +{ + return msm_readl(mdp4_kms->mmio + reg); +} + +static inline uint32_t pipe2flush(enum mdp4_pipe pipe) +{ + switch (pipe) { + case VG1: return MDP4_OVERLAY_FLUSH_VG1; + case VG2: return MDP4_OVERLAY_FLUSH_VG2; + case RGB1: return MDP4_OVERLAY_FLUSH_RGB1; + case RGB2: return MDP4_OVERLAY_FLUSH_RGB2; + default: return 0; + } +} + +static inline uint32_t ovlp2flush(int ovlp) +{ + switch (ovlp) { + case 0: return MDP4_OVERLAY_FLUSH_OVLP0; + case 1: return MDP4_OVERLAY_FLUSH_OVLP1; + default: return 0; + } +} + +static inline uint32_t dma2irq(enum mdp4_dma dma) +{ + switch (dma) { + case DMA_P: return MDP4_IRQ_DMA_P_DONE; + case DMA_S: return MDP4_IRQ_DMA_S_DONE; + case DMA_E: return MDP4_IRQ_DMA_E_DONE; + default: return 0; + } +} + +static inline uint32_t dma2err(enum mdp4_dma dma) +{ + switch (dma) { + case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN; + case DMA_S: return 0; // ??? + case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN; + default: return 0; + } +} + +static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer, + enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case VG1: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); + break; + case VG2: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); + break; + case RGB1: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); + break; + case RGB2: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); + break; + case RGB3: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); + break; + case VG3: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); + break; + case VG4: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); + break; + default: + WARN(1, "invalid pipe"); + break; + } + + return mixer_cfg; +} + +int mdp4_disable(struct mdp4_kms *mdp4_kms); +int mdp4_enable(struct mdp4_kms *mdp4_kms); + +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +void mdp4_irq_preinstall(struct msm_kms *kms); +int mdp4_irq_postinstall(struct msm_kms *kms); +void mdp4_irq_uninstall(struct msm_kms *kms); +irqreturn_t mdp4_irq(struct msm_kms *kms); +int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); + +static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe) +{ + switch (pipe) { + case VG1: + case VG2: + case VG3: + case VG4: + return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; + case RGB1: + case RGB2: + case RGB3: + return MDP_PIPE_CAP_SCALE; + default: + return 0; + } +} + +enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); +struct drm_plane *mdp4_plane_init(struct drm_device *dev, + enum mdp4_pipe pipe_id, bool private_plane); + +uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); +void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); +void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); +void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc); +struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, + struct drm_plane *plane, int id, int ovlp_id, + enum mdp4_dma dma_id); + +long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate); +struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev); + +long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate); +struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, + struct device_node *panel_node); + +struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, + struct device_node *panel_node, struct drm_encoder *encoder); + +#ifdef CONFIG_DRM_MSM_DSI +struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev); +#else +static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#ifdef CONFIG_COMMON_CLK +struct clk *mpd4_lvds_pll_init(struct drm_device *dev); +#else +static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING +/* bus scaling data is associated with extra pointless platform devices, + * "dtv", etc.. this is a bit of a hack, but we need a way for encoders + * to find their pdata to make the bus-scaling stuff work. + */ +static inline void *mdp4_find_pdata(const char *devname) +{ + struct device *dev; + dev = bus_find_device_by_name(&platform_bus_type, NULL, devname); + return dev ? dev->platform_data : NULL; +} +#endif + +#endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c new file mode 100644 index 000000000000..4a645926edb7 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -0,0 +1,503 @@ +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + * Author: Vinay Simha + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_lcdc_encoder { + struct drm_encoder base; + struct device_node *panel_node; + struct drm_panel *panel; + struct clk *lcdc_clk; + unsigned long int pixclock; + struct regulator *regs[3]; + bool enabled; + uint32_t bsc; +}; +#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING +#include +static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) +{ + struct drm_device *dev = mdp4_lcdc_encoder->base.dev; + struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0"); + + if (!lcdc_pdata) { + dev_err(dev->dev, "could not find lvds pdata\n"); + return; + } + + if (lcdc_pdata->bus_scale_table) { + mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client( + lcdc_pdata->bus_scale_table); + DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc); + } +} + +static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) +{ + if (mdp4_lcdc_encoder->bsc) { + msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc); + mdp4_lcdc_encoder->bsc = 0; + } +} + +static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) +{ + if (mdp4_lcdc_encoder->bsc) { + DBG("set bus scaling: %d", idx); + msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx); + } +} +#else +static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} +static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} +static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {} +#endif + +static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + bs_fini(mdp4_lcdc_encoder); + drm_encoder_cleanup(encoder); + kfree(mdp4_lcdc_encoder); +} + +static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = { + .destroy = mdp4_lcdc_encoder_destroy, +}; + +/* this should probably be a helper: */ +static struct drm_connector *get_connector(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->encoder == encoder) + return connector; + + return NULL; +} + +static void setup_phy(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_connector *connector = get_connector(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0; + int bpp, nchan, swap; + + if (!connector) + return; + + bpp = 3 * connector->display_info.bpc; + + if (!bpp) + bpp = 18; + + /* TODO, these should come from panel somehow: */ + nchan = 1; + swap = 0; + + switch (bpp) { + case 24: + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06)); + if (nchan == 2) { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } else { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } + break; + + case 18: + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14)); + if (nchan == 2) { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } else { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT; + break; + + default: + dev_err(dev->dev, "unknown bpp: %d\n", bpp); + return; + } + + switch (nchan) { + case 1: + lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0; + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN | + MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL; + break; + case 2: + lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 | + MDP4_LVDS_PHY_CFG0_CHANNEL1; + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN; + break; + default: + dev_err(dev->dev, "unknown # of channels: %d\n", nchan); + return; + } + + if (swap) + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP; + + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE; + + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf); + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30); + + mb(); + udelay(1); + lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE; + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); +} + +static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + mdp4_lcdc_encoder->pixclock = mode->clock * 1000; + + DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + lcdc_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL, + MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL, + MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR, + MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL, + MDP4_LCDC_ACTIVE_HCTL_START(0) | + MDP4_LCDC_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0); +} + +static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel; + int i, ret; + + if (WARN_ON(!mdp4_lcdc_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + + panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); + if (panel) { + drm_panel_disable(panel); + drm_panel_unprepare(panel); + } + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); + + clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); + if (ret) + dev_err(dev->dev, "failed to disable regulator: %d\n", ret); + } + + bs_set(mdp4_lcdc_encoder, 0); + + mdp4_lcdc_encoder->enabled = false; +} + +static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + unsigned long pc = mdp4_lcdc_encoder->pixclock; + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel; + int i, ret; + + if (WARN_ON(mdp4_lcdc_encoder->enabled)) + return; + + /* TODO: hard-coded for 18bpp: */ + mdp4_crtc_set_config(encoder->crtc, + MDP4_DMA_CONFIG_R_BPC(BPC6) | + MDP4_DMA_CONFIG_G_BPC(BPC6) | + MDP4_DMA_CONFIG_B_BPC(BPC6) | + MDP4_DMA_CONFIG_PACK_ALIGN_MSB | + MDP4_DMA_CONFIG_PACK(0x21) | + MDP4_DMA_CONFIG_DEFLKR_EN | + MDP4_DMA_CONFIG_DITHER_EN); + mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); + + bs_set(mdp4_lcdc_encoder, 1); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); + if (ret) + dev_err(dev->dev, "failed to enable regulator: %d\n", ret); + } + + DBG("setting lcdc_clk=%lu", pc); + ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); + if (ret) + dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); + if (ret) + dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); + + panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); + if (panel) { + drm_panel_prepare(panel); + drm_panel_enable(panel); + } + + setup_phy(encoder); + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); + + mdp4_lcdc_encoder->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { + .mode_set = mdp4_lcdc_encoder_mode_set, + .disable = mdp4_lcdc_encoder_disable, + .enable = mdp4_lcdc_encoder_enable, +}; + +long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate); +} + +/* initialize encoder */ +struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, + struct device_node *panel_node) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; + struct regulator *reg; + int ret; + + mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL); + if (!mdp4_lcdc_encoder) { + ret = -ENOMEM; + goto fail; + } + + mdp4_lcdc_encoder->panel_node = panel_node; + + encoder = &mdp4_lcdc_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, + DRM_MODE_ENCODER_LVDS, NULL); + drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); + + /* TODO: do we need different pll in other cases? */ + mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); + if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { + dev_err(dev->dev, "failed to get lvds_clk\n"); + ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk); + goto fail; + } + + /* TODO: different regulators in other cases? */ + reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v"); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); + goto fail; + } + mdp4_lcdc_encoder->regs[0] = reg; + + reg = devm_regulator_get(dev->dev, "lvds-pll-vdda"); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); + goto fail; + } + mdp4_lcdc_encoder->regs[1] = reg; + + reg = devm_regulator_get(dev->dev, "lvds-vdda"); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret); + goto fail; + } + mdp4_lcdc_encoder->regs[2] = reg; + + bs_init(mdp4_lcdc_encoder); + + return encoder; + +fail: + if (encoder) + mdp4_lcdc_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c new file mode 100644 index 000000000000..e3b1c86b7aae --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + * Author: Vinay Simha + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "mdp4_kms.h" + +struct mdp4_lvds_connector { + struct drm_connector base; + struct drm_encoder *encoder; + struct device_node *panel_node; + struct drm_panel *panel; +}; +#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base) + +static enum drm_connector_status mdp4_lvds_connector_detect( + struct drm_connector *connector, bool force) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + + if (!mdp4_lvds_connector->panel) + mdp4_lvds_connector->panel = + of_drm_find_panel(mdp4_lvds_connector->panel_node); + + return mdp4_lvds_connector->panel ? + connector_status_connected : + connector_status_disconnected; +} + +static void mdp4_lvds_connector_destroy(struct drm_connector *connector) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + + drm_connector_cleanup(connector); + + kfree(mdp4_lvds_connector); +} + +static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + struct drm_panel *panel = mdp4_lvds_connector->panel; + int ret = 0; + + if (panel) { + drm_panel_attach(panel, connector); + + ret = panel->funcs->get_modes(panel); + + drm_panel_detach(panel); + } + + return ret; +} + +static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + struct drm_encoder *encoder = mdp4_lvds_connector->encoder; + long actual, requested; + + requested = 1000 * mode->clock; + actual = mdp4_lcdc_round_pixclk(encoder, requested); + + DBG("requested=%ld, actual=%ld", requested, actual); + + if (actual != requested) + return MODE_CLOCK_RANGE; + + return MODE_OK; +} + +static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { + .detect = mdp4_lvds_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = mdp4_lvds_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { + .get_modes = mdp4_lvds_connector_get_modes, + .mode_valid = mdp4_lvds_connector_mode_valid, +}; + +/* initialize connector */ +struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, + struct device_node *panel_node, struct drm_encoder *encoder) +{ + struct drm_connector *connector = NULL; + struct mdp4_lvds_connector *mdp4_lvds_connector; + + mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL); + if (!mdp4_lvds_connector) + return ERR_PTR(-ENOMEM); + + mdp4_lvds_connector->encoder = encoder; + mdp4_lvds_connector->panel_node = panel_node; + + connector = &mdp4_lvds_connector->base; + + drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs); + + connector->polled = 0; + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + drm_mode_connector_attach_encoder(connector, encoder); + + return connector; +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c new file mode 100644 index 000000000000..ce4245971673 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_lvds_pll { + struct clk_hw pll_hw; + struct drm_device *dev; + unsigned long pixclk; +}; +#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw) + +static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll) +{ + struct msm_drm_private *priv = lvds_pll->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +struct pll_rate { + unsigned long rate; + struct { + uint32_t val; + uint32_t reg; + } conf[32]; +}; + +/* NOTE: keep sorted highest freq to lowest: */ +static const struct pll_rate freqtbl[] = { + { 72000000, { + { 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 }, + { 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 }, + { 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 }, + { 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 }, + { 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 }, + { 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 }, + { 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 }, + { 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 }, + { 0, 0 } } + }, +}; + +static const struct pll_rate *find_rate(unsigned long rate) +{ + int i; + for (i = 1; i < ARRAY_SIZE(freqtbl); i++) + if (rate > freqtbl[i].rate) + return &freqtbl[i-1]; + return &freqtbl[i-1]; +} + +static int mpd4_lvds_pll_enable(struct clk_hw *hw) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + struct mdp4_kms *mdp4_kms = get_kms(lvds_pll); + const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk); + int i; + + DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate); + + if (WARN_ON(!pll_rate)) + return -EINVAL; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33); + + for (i = 0; pll_rate->conf[i].reg; i++) + mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val); + + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01); + + /* Wait until LVDS PLL is locked and ready */ + while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED)) + cpu_relax(); + + return 0; +} + +static void mpd4_lvds_pll_disable(struct clk_hw *hw) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + struct mdp4_kms *mdp4_kms = get_kms(lvds_pll); + + DBG(""); + + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0); + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0); +} + +static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + return lvds_pll->pixclk; +} + +static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + const struct pll_rate *pll_rate = find_rate(rate); + return pll_rate->rate; +} + +static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + lvds_pll->pixclk = rate; + return 0; +} + + +static const struct clk_ops mpd4_lvds_pll_ops = { + .enable = mpd4_lvds_pll_enable, + .disable = mpd4_lvds_pll_disable, + .recalc_rate = mpd4_lvds_pll_recalc_rate, + .round_rate = mpd4_lvds_pll_round_rate, + .set_rate = mpd4_lvds_pll_set_rate, +}; + +static const char *mpd4_lvds_pll_parents[] = { + "pxo", +}; + +static struct clk_init_data pll_init = { + .name = "mpd4_lvds_pll", + .ops = &mpd4_lvds_pll_ops, + .parent_names = mpd4_lvds_pll_parents, + .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents), +}; + +struct clk *mpd4_lvds_pll_init(struct drm_device *dev) +{ + struct mdp4_lvds_pll *lvds_pll; + struct clk *clk; + int ret; + + lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL); + if (!lvds_pll) { + ret = -ENOMEM; + goto fail; + } + + lvds_pll->dev = dev; + + lvds_pll->pll_hw.init = &pll_init; + clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto fail; + } + + return clk; + +fail: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c new file mode 100644 index 000000000000..7a1ad3af08e3 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c @@ -0,0 +1,419 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "mdp4_kms.h" + +#define DOWN_SCALE_MAX 8 +#define UP_SCALE_MAX 8 + +struct mdp4_plane { + struct drm_plane base; + const char *name; + + enum mdp4_pipe pipe; + + uint32_t caps; + uint32_t nformats; + uint32_t formats[32]; + + bool enabled; +}; +#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) + +/* MDP format helper functions */ +static inline +enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) +{ + bool is_tile = false; + + if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) + is_tile = true; + + if (fb->format->format == DRM_FORMAT_NV12 && is_tile) + return FRAME_TILE_YCBCR_420; + + return FRAME_LINEAR; +} + +static void mdp4_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb); +static int mdp4_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); + +static struct mdp4_kms *get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void mdp4_plane_destroy(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); + + kfree(mdp4_plane); +} + +/* helper to install properties which are common to planes and crtcs */ +static void mdp4_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj) +{ + // XXX +} + +static int mdp4_plane_set_property(struct drm_plane *plane, + struct drm_property *property, uint64_t val) +{ + // XXX + return -EINVAL; +} + +static const struct drm_plane_funcs mdp4_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = mdp4_plane_destroy, + .set_property = mdp4_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int mdp4_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + struct msm_kms *kms = &mdp4_kms->base.base; + struct drm_framebuffer *fb = new_state->fb; + + if (!fb) + return 0; + + DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); + return msm_framebuffer_prepare(fb, kms->aspace); +} + +static void mdp4_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + struct msm_kms *kms = &mdp4_kms->base.base; + struct drm_framebuffer *fb = old_state->fb; + + if (!fb) + return; + + DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); + msm_framebuffer_cleanup(fb, kms->aspace); +} + + +static int mdp4_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + return 0; +} + +static void mdp4_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + int ret; + + ret = mdp4_plane_mode_set(plane, + state->crtc, state->fb, + state->crtc_x, state->crtc_y, + state->crtc_w, state->crtc_h, + state->src_x, state->src_y, + state->src_w, state->src_h); + /* atomic_check should have ensured that this doesn't fail */ + WARN_ON(ret < 0); +} + +static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = { + .prepare_fb = mdp4_plane_prepare_fb, + .cleanup_fb = mdp4_plane_cleanup_fb, + .atomic_check = mdp4_plane_atomic_check, + .atomic_update = mdp4_plane_atomic_update, +}; + +static void mdp4_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + struct msm_kms *kms = &mdp4_kms->base.base; + enum mdp4_pipe pipe = mdp4_plane->pipe; + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), + MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | + MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe), + MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | + MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 0)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 1)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 2)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 3)); + + plane->fb = fb; +} + +static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms, + enum mdp4_pipe pipe, struct csc_cfg *csc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i), + csc->matrix[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i), + csc->pre_bias[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i), + csc->post_bias[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i), + csc->pre_clamp[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i), + csc->post_clamp[i]); + } +} + +#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 + +static int mdp4_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = plane->dev; + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + enum mdp4_pipe pipe = mdp4_plane->pipe; + const struct mdp_format *format; + uint32_t op_mode = 0; + uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; + uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; + enum mdp4_frame_format frame_type; + + if (!(crtc && fb)) { + DBG("%s: disabled!", mdp4_plane->name); + return 0; + } + + frame_type = mdp4_get_frame_format(fb); + + /* src values are in Q16 fixed point, convert to integer: */ + src_x = src_x >> 16; + src_y = src_y >> 16; + src_w = src_w >> 16; + src_h = src_h >> 16; + + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name, + fb->base.id, src_x, src_y, src_w, src_h, + crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + + format = to_mdp_format(msm_framebuffer_format(fb)); + + if (src_w > (crtc_w * DOWN_SCALE_MAX)) { + dev_err(dev->dev, "Width down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (src_h > (crtc_h * DOWN_SCALE_MAX)) { + dev_err(dev->dev, "Height down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_w > (src_w * UP_SCALE_MAX)) { + dev_err(dev->dev, "Width up scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_h > (src_h * UP_SCALE_MAX)) { + dev_err(dev->dev, "Height up scaling exceeds limits!\n"); + return -ERANGE; + } + + if (src_w != crtc_w) { + uint32_t sel_unit = SCALE_FIR; + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; + + if (MDP_FORMAT_IS_YUV(format)) { + if (crtc_w > src_w) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_w <= (src_w / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit); + phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_w, crtc_w); + } + } + + if (src_h != crtc_h) { + uint32_t sel_unit = SCALE_FIR; + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; + + if (MDP_FORMAT_IS_YUV(format)) { + + if (crtc_h > src_h) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_h <= (src_h / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit); + phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_h, crtc_h); + } + } + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe), + MDP4_PIPE_SRC_SIZE_WIDTH(src_w) | + MDP4_PIPE_SRC_SIZE_HEIGHT(src_h)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe), + MDP4_PIPE_SRC_XY_X(src_x) | + MDP4_PIPE_SRC_XY_Y(src_y)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe), + MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) | + MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), + MDP4_PIPE_DST_XY_X(crtc_x) | + MDP4_PIPE_DST_XY_Y(crtc_y)); + + mdp4_plane_set_scanout(plane, fb); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), + MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | + MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | + MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | + MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | + MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | + MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | + MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) | + COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), + MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | + MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | + MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | + MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + + if (MDP_FORMAT_IS_YUV(format)) { + struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB); + + op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR; + op_mode |= MDP4_PIPE_OP_MODE_CSC_EN; + mdp4_write_csc_config(mdp4_kms, pipe, csc); + } + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); + + if (frame_type != FRAME_LINEAR) + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe), + MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) | + MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h)); + + return 0; +} + +static const char *pipe_names[] = { + "VG1", "VG2", + "RGB1", "RGB2", "RGB3", + "VG3", "VG4", +}; + +enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + return mdp4_plane->pipe; +} + +/* initialize plane */ +struct drm_plane *mdp4_plane_init(struct drm_device *dev, + enum mdp4_pipe pipe_id, bool private_plane) +{ + struct drm_plane *plane = NULL; + struct mdp4_plane *mdp4_plane; + int ret; + enum drm_plane_type type; + + mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL); + if (!mdp4_plane) { + ret = -ENOMEM; + goto fail; + } + + plane = &mdp4_plane->base; + + mdp4_plane->pipe = pipe_id; + mdp4_plane->name = pipe_names[pipe_id]; + mdp4_plane->caps = mdp4_pipe_caps(pipe_id); + + mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats, + ARRAY_SIZE(mdp4_plane->formats), + !pipe_supports_yuv(mdp4_plane->caps)); + + type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, + mdp4_plane->formats, mdp4_plane->nformats, + NULL, type, NULL); + if (ret) + goto fail; + + drm_plane_helper_add(plane, &mdp4_plane_helper_funcs); + + mdp4_plane_install_properties(plane, &plane->base); + + return plane; + +fail: + if (plane) + mdp4_plane_destroy(plane); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h new file mode 100644 index 000000000000..d9c10e02ee41 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h @@ -0,0 +1,1968 @@ +#ifndef MDP5_XML +#define MDP5_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) + +Copyright (C) 2013-2017 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mdp5_intf_type { + INTF_DISABLED = 0, + INTF_DSI = 1, + INTF_HDMI = 3, + INTF_LCDC = 5, + INTF_eDP = 9, + INTF_VIRTUAL = 100, + INTF_WB = 101, +}; + +enum mdp5_intfnum { + NO_INTF = 0, + INTF0 = 1, + INTF1 = 2, + INTF2 = 3, + INTF3 = 4, +}; + +enum mdp5_pipe { + SSPP_NONE = 0, + SSPP_VIG0 = 1, + SSPP_VIG1 = 2, + SSPP_VIG2 = 3, + SSPP_RGB0 = 4, + SSPP_RGB1 = 5, + SSPP_RGB2 = 6, + SSPP_DMA0 = 7, + SSPP_DMA1 = 8, + SSPP_VIG3 = 9, + SSPP_RGB3 = 10, + SSPP_CURSOR0 = 11, + SSPP_CURSOR1 = 12, +}; + +enum mdp5_ctl_mode { + MODE_NONE = 0, + MODE_WB_0_BLOCK = 1, + MODE_WB_1_BLOCK = 2, + MODE_WB_0_LINE = 3, + MODE_WB_1_LINE = 4, + MODE_WB_2_LINE = 5, +}; + +enum mdp5_pack_3d { + PACK_3D_FRAME_INT = 0, + PACK_3D_H_ROW_INT = 1, + PACK_3D_V_ROW_INT = 2, + PACK_3D_COL_INT = 3, +}; + +enum mdp5_scale_filter { + SCALE_FILTER_NEAREST = 0, + SCALE_FILTER_BIL = 1, + SCALE_FILTER_PCMN = 2, + SCALE_FILTER_CA = 3, +}; + +enum mdp5_pipe_bwc { + BWC_LOSSLESS = 0, + BWC_Q_HIGH = 1, + BWC_Q_MED = 2, +}; + +enum mdp5_cursor_format { + CURSOR_FMT_ARGB8888 = 0, + CURSOR_FMT_ARGB1555 = 2, + CURSOR_FMT_ARGB4444 = 4, +}; + +enum mdp5_cursor_alpha { + CURSOR_ALPHA_CONST = 0, + CURSOR_ALPHA_PER_PIXEL = 2, +}; + +enum mdp5_igc_type { + IGC_VIG = 0, + IGC_RGB = 1, + IGC_DMA = 2, + IGC_DSPP = 3, +}; + +enum mdp5_data_format { + DATA_FORMAT_RGB = 0, + DATA_FORMAT_YUV = 1, +}; + +enum mdp5_block_size { + BLOCK_SIZE_64 = 0, + BLOCK_SIZE_128 = 1, +}; + +enum mdp5_rotate_mode { + ROTATE_0 = 0, + ROTATE_90 = 1, +}; + +enum mdp5_chroma_downsample_method { + DS_MTHD_NO_PIXEL_DROP = 0, + DS_MTHD_PIXEL_DROP = 1, +}; + +#define MDP5_IRQ_WB_0_DONE 0x00000001 +#define MDP5_IRQ_WB_1_DONE 0x00000002 +#define MDP5_IRQ_WB_2_DONE 0x00000010 +#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100 +#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200 +#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400 +#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800 +#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000 +#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000 +#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000 +#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000 +#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000 +#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000 +#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000 +#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000 +#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000 +#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000 +#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000 +#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000 +#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 +#define MDP5_IRQ_INTF0_VSYNC 0x02000000 +#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 +#define MDP5_IRQ_INTF1_VSYNC 0x08000000 +#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000 +#define MDP5_IRQ_INTF2_VSYNC 0x20000000 +#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 +#define MDP5_IRQ_INTF3_VSYNC 0x80000000 +#define REG_MDSS_HW_VERSION 0x00000000 +#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff +#define MDSS_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK; +} +#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDSS_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK; +} +#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDSS_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK; +} + +#define REG_MDSS_HW_INTR_STATUS 0x00000010 +#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001 +#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010 +#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020 +#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 +#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 + +#define REG_MDP5_HW_VERSION 0x00000000 +#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff +#define MDP5_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK; +} +#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDP5_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK; +} +#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDP5_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK; +} + +#define REG_MDP5_DISP_INTF_SEL 0x00000004 +#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff +#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; +} +#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 +#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; +} +#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 +#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; +} +#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 +#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; +} + +#define REG_MDP5_INTR_EN 0x00000010 + +#define REG_MDP5_INTR_STATUS 0x00000014 + +#define REG_MDP5_INTR_CLEAR 0x00000018 + +#define REG_MDP5_HIST_INTR_EN 0x0000001c + +#define REG_MDP5_HIST_INTR_STATUS 0x00000020 + +#define REG_MDP5_HIST_INTR_CLEAR 0x00000024 + +#define REG_MDP5_SPARE_0 0x00000028 +#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 + +static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; } + +static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; } +#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff +#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; +} +#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; +} +#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; +} + +static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; } + +static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; } +#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff +#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; +} +#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; +} +#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; +} + +static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) +{ + switch (idx) { + case IGC_VIG: return 0x00000200; + case IGC_RGB: return 0x00000210; + case IGC_DMA: return 0x00000220; + case IGC_DSPP: return 0x00000300; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } + +static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } +#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff +#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 +static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) +{ + return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; +} +#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 + +#define REG_MDP5_SPLIT_DPL_EN 0x000002f4 + +#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 +#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 + +#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 +#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 + +static inline uint32_t __offset_CTL(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->ctl.base[0]); + case 1: return (mdp5_cfg->ctl.base[1]); + case 2: return (mdp5_cfg->ctl.base[2]); + case 3: return (mdp5_cfg->ctl.base[3]); + case 4: return (mdp5_cfg->ctl.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); } + +static inline uint32_t __offset_LAYER(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000000; + case 1: return 0x00000004; + case 2: return 0x00000008; + case 3: return 0x0000000c; + case 4: return 0x00000010; + case 5: return 0x00000024; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } + +static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } +#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 +#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; +} +#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 +#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; +} +#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 +#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 +#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 +#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 +#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; +} +#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 +#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 +static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; +} +#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 +#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 +static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; +} +#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 +#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 +#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 +#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 +#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; +} + +static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); } +#define MDP5_CTL_OP_MODE__MASK 0x0000000f +#define MDP5_CTL_OP_MODE__SHIFT 0 +static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) +{ + return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK; +} +#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070 +#define MDP5_CTL_OP_INTF_NUM__SHIFT 4 +static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val) +{ + return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK; +} +#define MDP5_CTL_OP_CMD_MODE 0x00020000 +#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000 +#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000 +#define MDP5_CTL_OP_PACK_3D__SHIFT 20 +static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) +{ + return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; +} + +static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); } +#define MDP5_CTL_FLUSH_VIG0 0x00000001 +#define MDP5_CTL_FLUSH_VIG1 0x00000002 +#define MDP5_CTL_FLUSH_VIG2 0x00000004 +#define MDP5_CTL_FLUSH_RGB0 0x00000008 +#define MDP5_CTL_FLUSH_RGB1 0x00000010 +#define MDP5_CTL_FLUSH_RGB2 0x00000020 +#define MDP5_CTL_FLUSH_LM0 0x00000040 +#define MDP5_CTL_FLUSH_LM1 0x00000080 +#define MDP5_CTL_FLUSH_LM2 0x00000100 +#define MDP5_CTL_FLUSH_LM3 0x00000200 +#define MDP5_CTL_FLUSH_LM4 0x00000400 +#define MDP5_CTL_FLUSH_DMA0 0x00000800 +#define MDP5_CTL_FLUSH_DMA1 0x00001000 +#define MDP5_CTL_FLUSH_DSPP0 0x00002000 +#define MDP5_CTL_FLUSH_DSPP1 0x00004000 +#define MDP5_CTL_FLUSH_DSPP2 0x00008000 +#define MDP5_CTL_FLUSH_WB 0x00010000 +#define MDP5_CTL_FLUSH_CTL 0x00020000 +#define MDP5_CTL_FLUSH_VIG3 0x00040000 +#define MDP5_CTL_FLUSH_RGB3 0x00080000 +#define MDP5_CTL_FLUSH_LM5 0x00100000 +#define MDP5_CTL_FLUSH_DSPP3 0x00200000 +#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000 +#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000 +#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000 +#define MDP5_CTL_FLUSH_TIMING_3 0x10000000 +#define MDP5_CTL_FLUSH_TIMING_2 0x20000000 +#define MDP5_CTL_FLUSH_TIMING_1 0x40000000 +#define MDP5_CTL_FLUSH_TIMING_0 0x80000000 + +static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } + +static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } + +static inline uint32_t __offset_LAYER_EXT(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000040; + case 1: return 0x00000044; + case 2: return 0x00000048; + case 3: return 0x0000004c; + case 4: return 0x00000050; + case 5: return 0x00000054; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } + +static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } +#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001 +#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004 +#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010 +#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040 +#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100 +#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400 +#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000 +#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000 +#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000 +#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK; +} +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK; +} + +static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) +{ + switch (idx) { + case SSPP_NONE: return (INVALID_IDX(idx)); + case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); + case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); + case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); + case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]); + case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]); + case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]); + case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]); + case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); + case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); + case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); + case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); + case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); } +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000 +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000 +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000 + +static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK; +} +#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK; +} +#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 +#define MDP5_PIPE_SRC_XY_Y__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK; +} +#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff +#define MDP5_PIPE_SRC_XY_X__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); } +#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK; +} +#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); } +#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 +#define MDP5_PIPE_OUT_XY_Y__SHIFT 16 +static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK; +} +#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff +#define MDP5_PIPE_OUT_XY_X__SHIFT 0 +static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff +#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK; +} +#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 +#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff +#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK; +} +#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 +#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 +#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c +#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 +#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 +#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 +#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 +#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800 +#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000 +#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 +#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000 +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 +#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff +#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK; +} +#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 +#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK; +} +#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 +#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK; +} +#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 +#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 +#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 +#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 +static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) +{ + return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK; +} +#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000 +#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000 +#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000 +#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000 +#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 +#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 +#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 +#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000 + +static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); } +#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff +#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 +static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) +{ + return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK; +} +#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00 +#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8 +static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) +{ + return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; +} + +static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx) +{ + switch (idx) { + case COMP_0: return 0x00000100; + case COMP_1_2: return 0x00000110; + case COMP_3: return 0x00000120; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00 +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000 +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000 +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00 +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000 +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000 +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000 +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } + +static inline uint32_t __offset_LM(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->lm.base[0]); + case 1: return (mdp5_cfg->lm.base[1]); + case 2: return (mdp5_cfg->lm.base[2]); + case 3: return (mdp5_cfg->lm.base[3]); + case 4: return (mdp5_cfg->lm.base[4]); + case 5: return (mdp5_cfg->lm.base[5]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } +#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 +#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 + +static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } +#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK; +} +#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } + +static inline uint32_t __offset_BLEND(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000020; + case 1: return 0x00000050; + case 2: return 0x00000080; + case 3: return 0x000000b0; + case 4: return 0x00000230; + case 5: return 0x00000260; + case 6: return 0x00000290; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } +#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 +#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 +static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK; +} +#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004 +#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008 +#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010 +#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020 +#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300 +#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8 +static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK; +} +#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400 +#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800 +#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 +#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 + +static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK; +} +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK; +} +#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff +#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK; +} +#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000 +#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } +#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff +#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } +#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007 +#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val) +{ + return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff +#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK; +} +#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000 +#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1 +static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val) +{ + return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK; +} +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008 + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); } + +static inline uint32_t __offset_DSPP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->dspp.base[0]); + case 1: return (mdp5_cfg->dspp.base[1]); + case 2: return (mdp5_cfg->dspp.base[2]); + case 3: return (mdp5_cfg->dspp.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } +#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 +#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e +#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 +static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) +{ + return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK; +} +#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010 +#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100 +#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000 +#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000 +#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000 +#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000 +#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 +#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 + +static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } + +static inline uint32_t __offset_PP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->pp.base[0]); + case 1: return (mdp5_cfg->pp.base[1]); + case 2: return (mdp5_cfg->pp.base[2]); + case 3: return (mdp5_cfg->pp.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); } +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK; +} +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000 +#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000 + +static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); } +#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff +#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK; +} +#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000 +#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16 +static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK; +} + +static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); } +#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff +#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK; +} +#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000 +#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16 +static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK; +} + +static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); } +#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff +#define MDP5_PP_SYNC_THRESH_START__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK; +} +#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000 +#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16 +static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK; +} + +static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } + +static inline uint32_t __offset_WB(uint32_t idx) +{ + switch (idx) { +#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */ + case 0: return (mdp5_cfg->wb.base[0]); + case 1: return (mdp5_cfg->wb.base[1]); + case 2: return (mdp5_cfg->wb.base[2]); + case 3: return (mdp5_cfg->wb.base[3]); + case 4: return (mdp5_cfg->wb.base[4]); +#endif + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } +#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003 +#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c +#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030 +#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0 +#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100 +#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600 +#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK; +} +#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000 +#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12 +static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000 +#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000 +#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000 +#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000 +#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19 +static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK; +} +#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000 +#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30 +static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); } +#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001 +#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006 +#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1 +static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK; +} +#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010 +#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4 +static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK; +} +#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020 +#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5 +static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK; +} +#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040 +#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100 +#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200 +#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9 +static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400 +#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10 +static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); } +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); } +#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff +#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0 +static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK; +} +#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000 +#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16 +static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); } +#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff +#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0 +static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK; +} +#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000 +#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16 +static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); } +#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff +#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0 +static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val) +{ + return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK; +} +#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000 +#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16 +static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val) +{ + return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK; +} + +static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK; +} +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK; +} +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK; +} + +static inline uint32_t __offset_INTF(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->intf.base[0]); + case 1: return (mdp5_cfg->intf.base[1]); + case 2: return (mdp5_cfg->intf.base[2]); + case 3: return (mdp5_cfg->intf.base[3]); + case 4: return (mdp5_cfg->intf.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); } +#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff +#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 +static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) +{ + return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK; +} +#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000 +#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16 +static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) +{ + return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; +} + +static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); } +#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff +#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 +static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK; +} +#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); } +#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff +#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 +static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; +} + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); } +#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff +#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 +static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) +{ + return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK; +} +#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000 +#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16 +static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) +{ + return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; +} + +static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); } +#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK; +} +#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK; +} +#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 + +static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); } +#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 +#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 +#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 + +static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); } + +static inline uint32_t __offset_AD(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->ad.base[0]); + case 1: return (mdp5_cfg->ad.base[1]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); } + + +#endif /* MDP5_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c new file mode 100644 index 000000000000..824067d2d427 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -0,0 +1,652 @@ +/* + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mdp5_kms.h" +#include "mdp5_cfg.h" + +struct mdp5_cfg_handler { + int revision; + struct mdp5_cfg config; +}; + +/* mdp5_cfg must be exposed (used in mdp5.xml.h) */ +const struct mdp5_cfg_hw *mdp5_cfg = NULL; + +const struct mdp5_cfg_hw msm8x74v1_config = { + .name = "msm8x74v1", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, + .flush_hw_mask = 0x0003ffff, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01100, 0x01500, 0x01900 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + 0, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01d00, 0x02100, 0x02500 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02900, 0x02d00 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + 0, + }, + .lm = { + .count = 5, + .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 3, + .base = { 0x04500, 0x04900, 0x04d00 }, + }, + .pp = { + .count = 3, + .base = { 0x21a00, 0x21b00, 0x21c00 }, + }, + .intf = { + .base = { 0x21000, 0x21200, 0x21400, 0x21600 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 200000000, +}; + +const struct mdp5_cfg_hw msm8x74v2_config = { + .name = "msm8x74", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, + .flush_hw_mask = 0x0003ffff, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01100, 0x01500, 0x01900 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01d00, 0x02100, 0x02500 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02900, 0x02d00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 5, + .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 3, + .base = { 0x04500, 0x04900, 0x04d00 }, + }, + .ad = { + .count = 2, + .base = { 0x13000, 0x13200 }, + }, + .pp = { + .count = 3, + .base = { 0x12c00, 0x12d00, 0x12e00 }, + }, + .intf = { + .base = { 0x12400, 0x12600, 0x12800, 0x12a00 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 200000000, +}; + +const struct mdp5_cfg_hw apq8084_config = { + .name = "apq8084", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_SRC_SPLIT | + 0, + }, + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, + .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */ + .reserved = { + /* Two SMP blocks are statically tied to RGB pipes: */ + [16] = 2, [17] = 2, [18] = 2, [22] = 2, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, + .flush_hw_mask = 0x003fffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x01100, 0x01500, 0x01900, 0x01d00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x02100, 0x02500, 0x02900, 0x02d00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x03100, 0x03500 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 6, + .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = 3, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 4, + .base = { 0x05100, 0x05500, 0x05900, 0x05d00 }, + + }, + .ad = { + .count = 3, + .base = { 0x13400, 0x13600, 0x13800 }, + }, + .pp = { + .count = 4, + .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 }, + }, + .intf = { + .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 320000000, +}; + +const struct mdp5_cfg_hw msm8x16_config = { + .name = "msm8x16", + .mdp = { + .count = 1, + .base = { 0x0 }, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 8, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0x4003ffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 2, /* LM0 and LM3 */ + .base = { 0x44000, 0x47000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .intf = { + .base = { 0x00000, 0x6a800 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + }, + }, + .max_clk = 320000000, +}; + +const struct mdp5_cfg_hw msm8x94_config = { + .name = "msm8x94", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_SRC_SPLIT | + 0, + }, + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, + .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */ + .reserved = { + [1] = 1, [4] = 1, [7] = 1, [19] = 1, + [16] = 5, [17] = 5, [18] = 5, [22] = 5, + }, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf0ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x24000, 0x26000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 6, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = 3, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 4, + .base = { 0x54000, 0x56000, 0x58000, 0x5a000 }, + + }, + .ad = { + .count = 3, + .base = { 0x78000, 0x78800, 0x79000 }, + }, + .pp = { + .count = 4, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 400000000, +}; + +const struct mdp5_cfg_hw msm8x96_config = { + .name = "msm8x96", + .mdp = { + .count = 1, + .caps = MDP_CAP_DSC | + MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf4ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x24000, 0x26000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 2, + .base = { 0x34000, 0x36000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 6, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 2, + .base = { 0x54000, 0x56000 }, + }, + .ad = { + .count = 3, + .base = { 0x78000, 0x78800, 0x79000 }, + }, + .pp = { + .count = 4, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 412500000, +}; + +static const struct mdp5_cfg_handler cfg_handlers[] = { + { .revision = 0, .config = { .hw = &msm8x74v1_config } }, + { .revision = 2, .config = { .hw = &msm8x74v2_config } }, + { .revision = 3, .config = { .hw = &apq8084_config } }, + { .revision = 6, .config = { .hw = &msm8x16_config } }, + { .revision = 9, .config = { .hw = &msm8x94_config } }, + { .revision = 7, .config = { .hw = &msm8x96_config } }, +}; + +static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); + +const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) +{ + return cfg_handler->config.hw; +} + +struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler) +{ + return &cfg_handler->config; +} + +int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler) +{ + return cfg_handler->revision; +} + +void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler) +{ + kfree(cfg_handler); +} + +struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, + uint32_t major, uint32_t minor) +{ + struct drm_device *dev = mdp5_kms->dev; + struct platform_device *pdev = to_platform_device(dev->dev); + struct mdp5_cfg_handler *cfg_handler; + struct mdp5_cfg_platform *pconfig; + int i, ret = 0; + + cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL); + if (unlikely(!cfg_handler)) { + ret = -ENOMEM; + goto fail; + } + + if (major != 1) { + dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + /* only after mdp5_cfg global pointer's init can we access the hw */ + for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) { + if (cfg_handlers[i].revision != minor) + continue; + mdp5_cfg = cfg_handlers[i].config.hw; + + break; + } + if (unlikely(!mdp5_cfg)) { + dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + cfg_handler->revision = minor; + cfg_handler->config.hw = mdp5_cfg; + + pconfig = mdp5_get_config(pdev); + memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig)); + + DBG("MDP5: %s hw config selected", mdp5_cfg->name); + + return cfg_handler; + +fail: + if (cfg_handler) + mdp5_cfg_destroy(cfg_handler); + + return NULL; +} + +static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) +{ + static struct mdp5_cfg_platform config = {}; + + config.iommu = iommu_domain_alloc(&platform_bus_type); + if (config.iommu) { + config.iommu->geometry.aperture_start = 0x1000; + config.iommu->geometry.aperture_end = 0xffffffff; + } + + return &config; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h new file mode 100644 index 000000000000..75910d0f2f4c --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDP5_CFG_H__ +#define __MDP5_CFG_H__ + +#include "msm_drv.h" + +/* + * mdp5_cfg + * + * This module configures the dynamic offsets used by mdp5.xml.h + * (initialized in mdp5_cfg.c) + */ +extern const struct mdp5_cfg_hw *mdp5_cfg; + +#define MAX_CTL 8 +#define MAX_BASES 8 +#define MAX_SMP_BLOCKS 44 +#define MAX_CLIENTS 32 + +typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); + +#define MDP5_SUB_BLOCK_DEFINITION \ + unsigned int count; \ + uint32_t base[MAX_BASES] + +struct mdp5_sub_block { + MDP5_SUB_BLOCK_DEFINITION; +}; + +struct mdp5_lm_instance { + int id; + int pp; + int dspp; + uint32_t caps; +}; + +struct mdp5_lm_block { + MDP5_SUB_BLOCK_DEFINITION; + struct mdp5_lm_instance instances[MAX_BASES]; + uint32_t nb_stages; /* number of stages per blender */ + uint32_t max_width; /* Maximum output resolution */ + uint32_t max_height; +}; + +struct mdp5_pipe_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* pipe capabilities */ +}; + +struct mdp5_ctl_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t flush_hw_mask; /* FLUSH register's hardware mask */ +}; + +struct mdp5_smp_block { + int mmb_count; /* number of SMP MMBs */ + int mmb_size; /* MMB: size in bytes */ + uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */ + mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ + uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ +}; + +struct mdp5_mdp_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */ +}; + +#define MDP5_INTF_NUM_MAX 5 + +struct mdp5_intf_block { + uint32_t base[MAX_BASES]; + u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */ +}; + +struct mdp5_cfg_hw { + char *name; + + struct mdp5_mdp_block mdp; + struct mdp5_smp_block smp; + struct mdp5_ctl_block ctl; + struct mdp5_pipe_block pipe_vig; + struct mdp5_pipe_block pipe_rgb; + struct mdp5_pipe_block pipe_dma; + struct mdp5_pipe_block pipe_cursor; + struct mdp5_lm_block lm; + struct mdp5_sub_block dspp; + struct mdp5_sub_block ad; + struct mdp5_sub_block pp; + struct mdp5_sub_block dsc; + struct mdp5_sub_block cdm; + struct mdp5_intf_block intf; + + uint32_t max_clk; +}; + +/* platform config data (ie. from DT, or pdata) */ +struct mdp5_cfg_platform { + struct iommu_domain *iommu; +}; + +struct mdp5_cfg { + const struct mdp5_cfg_hw *hw; + struct mdp5_cfg_platform platform; +}; + +struct mdp5_kms; +struct mdp5_cfg_handler; + +const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd); +struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd); +int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); + +#define mdp5_cfg_intf_is_virtual(intf_type) ({ \ + typeof(intf_type) __val = (intf_type); \ + (__val) >= INTF_VIRTUAL ? true : false; }) + +struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, + uint32_t major, uint32_t minor); +void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); + +#endif /* __MDP5_CFG_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c new file mode 100644 index 000000000000..1abc7f5c345c --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "mdp5_kms.h" + +static struct mdp5_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING +#include +#include +#include + +static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) +{ + if (mdp5_cmd_enc->bsc) { + DBG("set bus scaling: %d", idx); + /* HACK: scaling down, and then immediately back up + * seems to leave things broken (underflow).. so + * never disable: + */ + idx = 1; + msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx); + } +} +#else +static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} +#endif + +#define VSYNC_CLK_RATE 19200000 +static int pingpong_tearcheck_setup(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct device *dev = encoder->dev->dev; + u32 total_lines_x100, vclks_line, cfg; + long vsync_clk_speed; + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; + + if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { + dev_err(dev, "vsync_clk is not initialized\n"); + return -EINVAL; + } + + total_lines_x100 = mode->vtotal * mode->vrefresh; + if (!total_lines_x100) { + dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", + __func__, mode->vtotal, mode->vrefresh); + return -EINVAL; + } + + vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE); + if (vsync_clk_speed <= 0) { + dev_err(dev, "vsync_clk round rate failed %ld\n", + vsync_clk_speed); + return -EINVAL; + } + vclks_line = vsync_clk_speed * 100 / total_lines_x100; + + cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN + | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; + cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); + + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); + mdp5_write(mdp5_kms, + REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0); + mdp5_write(mdp5_kms, + REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); + mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id), + MDP5_PP_SYNC_THRESH_START(4) | + MDP5_PP_SYNC_THRESH_CONTINUE(4)); + + return 0; +} + +static int pingpong_tearcheck_enable(struct drm_encoder *encoder) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; + int ret; + + ret = clk_set_rate(mdp5_kms->vsync_clk, + clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE)); + if (ret) { + dev_err(encoder->dev->dev, + "vsync_clk clk_set_rate failed, %d\n", ret); + return ret; + } + ret = clk_prepare_enable(mdp5_kms->vsync_clk); + if (ret) { + dev_err(encoder->dev->dev, + "vsync_clk clk_prepare_enable failed, %d\n", ret); + return ret; + } + + mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1); + + return 0; +} + +static void pingpong_tearcheck_disable(struct drm_encoder *encoder) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; + + mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); + clk_disable_unprepare(mdp5_kms->vsync_clk); +} + +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + pingpong_tearcheck_setup(encoder, mode); + mdp5_crtc_set_pipeline(encoder->crtc); +} + +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; + struct mdp5_interface *intf = mdp5_cmd_enc->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + + if (WARN_ON(!mdp5_cmd_enc->enabled)) + return; + + pingpong_tearcheck_disable(encoder); + + mdp5_ctl_set_encoder_state(ctl, pipeline, false); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + + bs_set(mdp5_cmd_enc, 0); + + mdp5_cmd_enc->enabled = false; +} + +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; + struct mdp5_interface *intf = mdp5_cmd_enc->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + + if (WARN_ON(mdp5_cmd_enc->enabled)) + return; + + bs_set(mdp5_cmd_enc, 1); + if (pingpong_tearcheck_enable(encoder)) + return; + + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + + mdp5_ctl_set_encoder_state(ctl, pipeline, true); + + mdp5_cmd_enc->enabled = true; +} + +int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) +{ + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms; + struct device *dev; + int intf_num; + u32 data = 0; + + if (!encoder || !slave_encoder) + return -EINVAL; + + mdp5_kms = get_kms(encoder); + intf_num = mdp5_cmd_enc->intf->num; + + /* Switch slave encoder's trigger MUX, to use the master's + * start signal for the slave encoder + */ + if (intf_num == 1) + data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; + else if (intf_num == 2) + data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; + else + return -EINVAL; + + /* Smart Panel, Sync mode */ + data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; + + dev = &mdp5_kms->pdev->dev; + + /* Make sure clocks are on when connectors calling this function. */ + pm_runtime_get_sync(dev); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); + + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, + MDP5_SPLIT_DPL_LOWER_SMART_PANEL); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + pm_runtime_put_sync(dev); + + return 0; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c new file mode 100644 index 000000000000..8c5ed0b59e46 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -0,0 +1,1194 @@ +/* + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "mdp5_kms.h" + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + +struct mdp5_crtc { + struct drm_crtc base; + int id; + bool enabled; + + spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ + + /* if there is a pending flip, these will be non-null: */ + struct drm_pending_vblank_event *event; + + /* Bits have been flushed at the last commit, + * used to decide if a vsync has happened since last commit. + */ + u32 flushed_mask; + +#define PENDING_CURSOR 0x1 +#define PENDING_FLIP 0x2 + atomic_t pending; + + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + + struct mdp_irq vblank; + struct mdp_irq err; + struct mdp_irq pp_done; + + struct completion pp_completion; + + bool lm_cursor_enabled; + + struct { + /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ + spinlock_t lock; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + uint64_t iova; + uint32_t width, height; + uint32_t x, y; + } cursor; +}; +#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) + +static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc); + +static struct mdp5_kms *get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static void request_pending(struct drm_crtc *crtc, uint32_t pending) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + atomic_or(pending, &mdp5_crtc->pending); + mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); +} + +static void request_pp_done_pending(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + reinit_completion(&mdp5_crtc->pp_completion); +} + +static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + + DBG("%s: flush=%08x", crtc->name, flush_mask); + return mdp5_ctl_commit(ctl, pipeline, flush_mask); +} + +/* + * flush updates, to make sure hw is updated to new scanout fb, + * so that we can safely queue unref to current fb (ie. next + * vblank we know hw is done w/ previous scanout_fb). + */ +static u32 crtc_flush_all(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_hw_mixer *mixer, *r_mixer; + struct drm_plane *plane; + uint32_t flush_mask = 0; + + /* this should not happen: */ + if (WARN_ON(!mdp5_cstate->ctl)) + return 0; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + if (!plane->state->visible) + continue; + flush_mask |= mdp5_plane_get_flush(plane); + } + + mixer = mdp5_cstate->pipeline.mixer; + flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm); + + r_mixer = mdp5_cstate->pipeline.r_mixer; + if (r_mixer) + flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); + + return crtc_flush(crtc, flush_mask); +} + +/* if file!=NULL, this is preclose potential cancel-flip path */ +static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + struct drm_device *dev = crtc->dev; + struct drm_pending_vblank_event *event; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = mdp5_crtc->event; + if (event) { + mdp5_crtc->event = NULL; + DBG("%s: send event: %p", crtc->name, event); + drm_crtc_send_vblank_event(crtc, event); + } + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (ctl && !crtc->state->enable) { + /* set STAGE_UNUSED for all layers */ + mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0); + /* XXX: What to do here? */ + /* mdp5_crtc->ctl = NULL; */ + } +} + +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp5_crtc *mdp5_crtc = + container_of(work, struct mdp5_crtc, unref_cursor_work); + struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); + struct msm_kms *kms = &mdp5_kms->base.base; + + msm_gem_put_iova(val, kms->aspace); + drm_gem_object_put_unlocked(val); +} + +static void mdp5_crtc_destroy(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); + + kfree(mdp5_crtc); +} + +static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) +{ + switch (stage) { + case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA; + case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA; + case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA; + case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA; + case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA; + case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA; + case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA; + default: + return 0; + } +} + +/* + * left/right pipe offsets for the stage array used in blend_setup() + */ +#define PIPE_LEFT 0 +#define PIPE_RIGHT 1 + +/* + * blend_setup() - blend all the planes of a CRTC + * + * If no base layer is available, border will be enabled as the base layer. + * Otherwise all layers will be blended based on their stage calculated + * in mdp5_crtc_atomic_check. + */ +static void blend_setup(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_plane *plane; + const struct mdp5_cfg_hw *hw_cfg; + struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; + const struct mdp_format *format; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + uint32_t lm = mixer->lm; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + uint32_t r_lm = r_mixer ? r_mixer->lm : 0; + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; + unsigned long flags; + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + int i, plane_cnt = 0; + bool bg_alpha_enabled = false; + u32 mixer_op_mode = 0; + u32 val; +#define blender(stage) ((stage) - STAGE0) + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); + + /* ctl could be released already when we are shutting down: */ + /* XXX: Can this happen now? */ + if (!ctl) + goto out; + + /* Collect all plane information */ + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp5_pipe right_pipe; + + if (!plane->state->visible) + continue; + + pstate = to_mdp5_plane_state(plane->state); + pstates[pstate->stage] = pstate; + stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane); + /* + * if we have a right mixer, stage the same pipe as we + * have on the left mixer + */ + if (r_mixer) + r_stage[pstate->stage][PIPE_LEFT] = + mdp5_plane_pipe(plane); + /* + * if we have a right pipe (i.e, the plane comprises of 2 + * hwpipes, then stage the right pipe on the right side of both + * the layer mixers + */ + right_pipe = mdp5_plane_right_pipe(plane); + if (right_pipe) { + stage[pstate->stage][PIPE_RIGHT] = right_pipe; + r_stage[pstate->stage][PIPE_RIGHT] = right_pipe; + } + + plane_cnt++; + } + + if (!pstates[STAGE_BASE]) { + ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; + DBG("Border Color is enabled"); + } else if (plane_cnt) { + format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); + + if (format->alpha_enable) + bg_alpha_enabled = true; + } + + /* The reset for blending */ + for (i = STAGE0; i <= STAGE_MAX; i++) { + if (!pstates[i]) + continue; + + format = to_mdp_format( + msm_framebuffer_format(pstates[i]->base.fb)); + plane = pstates[i]->base.plane; + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); + fg_alpha = pstates[i]->alpha; + bg_alpha = 0xFF - pstates[i]->alpha; + + if (!format->alpha_enable && bg_alpha_enabled) + mixer_op_mode = 0; + else + mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i); + + DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); + + if (format->alpha_enable && pstates[i]->premultiplied) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } else if (format->alpha_enable) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } + + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm, + blender(i)), blend_op); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, + blender(i)), fg_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, + blender(i)), bg_alpha); + if (r_mixer) { + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm, + blender(i)), blend_op); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm, + blender(i)), fg_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm, + blender(i)), bg_alpha); + } + } + + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), + val | mixer_op_mode); + if (r_mixer) { + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), + val | mixer_op_mode); + } + + mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt, + ctl_blend_flags); +out: + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); +} + +static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer; + struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer; + uint32_t lm = mixer->lm; + u32 mixer_width, val; + unsigned long flags; + struct drm_display_mode *mode; + + if (WARN_ON(!crtc->state)) + return; + + mode = &crtc->state->adjusted_mode; + + DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + crtc->name, mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + mixer_width = mode->hdisplay; + if (r_mixer) + mixer_width /= 2; + + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm), + MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | + MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); + + /* Assign mixer to LEFT side in source split mode */ + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); + val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val); + + if (r_mixer) { + u32 r_lm = r_mixer->lm; + + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm), + MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | + MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); + + /* Assign mixer to RIGHT side in source split mode */ + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); + val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val); + } + + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); +} + +static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct device *dev = &mdp5_kms->pdev->dev; + + DBG("%s", crtc->name); + + if (WARN_ON(!mdp5_crtc->enabled)) + return; + + /* Disable/save vblank irq handling before power is disabled */ + drm_crtc_vblank_off(crtc); + + if (mdp5_cstate->cmd_mode) + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); + + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); + pm_runtime_put_sync(dev); + + mdp5_crtc->enabled = false; +} + +static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct device *dev = &mdp5_kms->pdev->dev; + + DBG("%s", crtc->name); + + if (WARN_ON(mdp5_crtc->enabled)) + return; + + pm_runtime_get_sync(dev); + + if (mdp5_crtc->lm_cursor_enabled) { + /* + * Restore LM cursor state, as it might have been lost + * with suspend: + */ + if (mdp5_crtc->cursor.iova) { + unsigned long flags; + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + mdp5_crtc_restore_cursor(crtc); + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + mdp5_ctl_set_cursor(mdp5_cstate->ctl, + &mdp5_cstate->pipeline, 0, true); + } else { + mdp5_ctl_set_cursor(mdp5_cstate->ctl, + &mdp5_cstate->pipeline, 0, false); + } + } + + /* Restore vblank irq handling after power is enabled */ + drm_crtc_vblank_on(crtc); + + mdp5_crtc_mode_set_nofb(crtc); + + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); + + if (mdp5_cstate->cmd_mode) + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); + + mdp5_crtc->enabled = true; +} + +int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + bool need_right_mixer) +{ + struct mdp5_crtc_state *mdp5_cstate = + to_mdp5_crtc_state(new_crtc_state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_interface *intf; + bool new_mixer = false; + + new_mixer = !pipeline->mixer; + + if ((need_right_mixer && !pipeline->r_mixer) || + (!need_right_mixer && pipeline->r_mixer)) + new_mixer = true; + + if (new_mixer) { + struct mdp5_hw_mixer *old_mixer = pipeline->mixer; + struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer; + u32 caps; + int ret; + + caps = MDP_LM_CAP_DISPLAY; + if (need_right_mixer) + caps |= MDP_LM_CAP_PAIR; + + ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps, + &pipeline->mixer, need_right_mixer ? + &pipeline->r_mixer : NULL); + if (ret) + return ret; + + mdp5_mixer_release(new_crtc_state->state, old_mixer); + if (old_r_mixer) { + mdp5_mixer_release(new_crtc_state->state, old_r_mixer); + if (!need_right_mixer) + pipeline->r_mixer = NULL; + } + } + + /* + * these should have been already set up in the encoder's atomic + * check (called by drm_atomic_helper_check_modeset) + */ + intf = pipeline->intf; + + mdp5_cstate->err_irqmask = intf2err(intf->num); + mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf); + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) { + mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer); + mdp5_cstate->cmd_mode = true; + } else { + mdp5_cstate->pp_done_irqmask = 0; + mdp5_cstate->cmd_mode = false; + } + + return 0; +} + +struct plane_state { + struct drm_plane *plane; + struct mdp5_plane_state *state; +}; + +static int pstate_cmp(const void *a, const void *b) +{ + struct plane_state *pa = (struct plane_state *)a; + struct plane_state *pb = (struct plane_state *)b; + return pa->state->zpos - pb->state->zpos; +} + +/* is there a helper for this? */ +static bool is_fullscreen(struct drm_crtc_state *cstate, + struct drm_plane_state *pstate) +{ + return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && + ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && + ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); +} + +static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + struct drm_plane_state *bpstate) +{ + struct mdp5_crtc_state *mdp5_cstate = + to_mdp5_crtc_state(new_crtc_state); + + /* + * if we're in source split mode, it's mandatory to have + * border out on the base stage + */ + if (mdp5_cstate->pipeline.r_mixer) + return STAGE0; + + /* if the bottom-most layer is not fullscreen, we need to use + * it for solid-color: + */ + if (!is_fullscreen(new_crtc_state, bpstate)) + return STAGE0; + + return STAGE_BASE; +} + +static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_plane *plane; + struct drm_device *dev = crtc->dev; + struct plane_state pstates[STAGE_MAX + 1]; + const struct mdp5_cfg_hw *hw_cfg; + const struct drm_plane_state *pstate; + const struct drm_display_mode *mode = &state->adjusted_mode; + bool cursor_plane = false; + bool need_right_mixer = false; + int cnt = 0, i; + int ret; + enum mdp_mixer_stage_id start; + + DBG("%s: check", crtc->name); + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + if (!pstate->visible) + continue; + + pstates[cnt].plane = plane; + pstates[cnt].state = to_mdp5_plane_state(pstate); + + /* + * if any plane on this crtc uses 2 hwpipes, then we need + * the crtc to have a right hwmixer. + */ + if (pstates[cnt].state->r_hwpipe) + need_right_mixer = true; + cnt++; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_plane = true; + } + + /* bail out early if there aren't any planes */ + if (!cnt) + return 0; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* + * we need a right hwmixer if the mode's width is greater than a single + * LM's max width + */ + if (mode->hdisplay > hw_cfg->lm.max_width) + need_right_mixer = true; + + ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer); + if (ret) { + dev_err(dev->dev, "couldn't assign mixers %d\n", ret); + return ret; + } + + /* assign a stage based on sorted zpos property */ + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + + /* trigger a warning if cursor isn't the highest zorder */ + WARN_ON(cursor_plane && + (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); + + start = get_start_stage(crtc, state, &pstates[0].state->base); + + /* verify that there are not too many planes attached to crtc + * and that we don't have conflicting mixer stages: + */ + if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) { + dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n", + cnt, start); + return -EINVAL; + } + + for (i = 0; i < cnt; i++) { + if (cursor_plane && (i == (cnt - 1))) + pstates[i].state->stage = hw_cfg->lm.nb_stages; + else + pstates[i].state->stage = start + i; + DBG("%s: assign pipe %s on stage=%d", crtc->name, + pstates[i].plane->name, + pstates[i].state->stage); + } + + return 0; +} + +static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + DBG("%s: begin", crtc->name); +} + +static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + DBG("%s: event: %p", crtc->name, crtc->state->event); + + WARN_ON(mdp5_crtc->event); + + spin_lock_irqsave(&dev->event_lock, flags); + mdp5_crtc->event = crtc->state->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* + * If no CTL has been allocated in mdp5_crtc_atomic_check(), + * it means we are trying to flush a CRTC whose state is disabled: + * nothing else needs to be done. + */ + /* XXX: Can this happen now ? */ + if (unlikely(!mdp5_cstate->ctl)) + return; + + blend_setup(crtc); + + /* PP_DONE irq is only used by command mode for now. + * It is better to request pending before FLUSH and START trigger + * to make sure no pp_done irq missed. + * This is safe because no pp_done will happen before SW trigger + * in command mode. + */ + if (mdp5_cstate->cmd_mode) + request_pp_done_pending(crtc); + + mdp5_crtc->flushed_mask = crtc_flush_all(crtc); + + /* XXX are we leaking out state here? */ + mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask; + mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask; + mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask; + + request_pending(crtc, PENDING_FLIP); +} + +static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + uint32_t xres = crtc->mode.hdisplay; + uint32_t yres = crtc->mode.vdisplay; + + /* + * Cursor Region Of Interest (ROI) is a plane read from cursor + * buffer to render. The ROI region is determined by the visibility of + * the cursor point. In the default Cursor image the cursor point will + * be at the top left of the cursor image, unless it is specified + * otherwise using hotspot feature. + * + * If the cursor point reaches the right (xres - x < cursor.width) or + * bottom (yres - y < cursor.height) boundary of the screen, then ROI + * width and ROI height need to be evaluated to crop the cursor image + * accordingly. + * (xres-x) will be new cursor width when x > (xres - cursor.width) + * (yres-y) will be new cursor height when y > (yres - cursor.height) + */ + *roi_w = min(mdp5_crtc->cursor.width, xres - + mdp5_crtc->cursor.x); + *roi_h = min(mdp5_crtc->cursor.height, yres - + mdp5_crtc->cursor.y); +} + +static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; + uint32_t blendcfg, stride; + uint32_t x, y, width, height; + uint32_t roi_w, roi_h; + int lm; + + assert_spin_locked(&mdp5_crtc->cursor.lock); + + lm = mdp5_cstate->pipeline.mixer->lm; + + x = mdp5_crtc->cursor.x; + y = mdp5_crtc->cursor.y; + width = mdp5_crtc->cursor.width; + height = mdp5_crtc->cursor.height; + + stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); + + get_roi(crtc, &roi_w, &roi_h); + + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), + MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm), + MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | + MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), + MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | + MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm), + MDP5_LM_CURSOR_START_XY_Y_START(y) | + MDP5_LM_CURSOR_START_XY_X_START(x)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), + mdp5_crtc->cursor.iova); + + blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; + blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); +} + +static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct drm_device *dev = crtc->dev; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct platform_device *pdev = mdp5_kms->pdev; + struct msm_kms *kms = &mdp5_kms->base.base; + struct drm_gem_object *cursor_bo, *old_bo = NULL; + struct mdp5_ctl *ctl; + int ret; + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + bool cursor_enable = true; + unsigned long flags; + + if (!mdp5_crtc->lm_cursor_enabled) { + dev_warn(dev->dev, + "cursor_set is deprecated with cursor planes\n"); + return -EINVAL; + } + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + ctl = mdp5_cstate->ctl; + if (!ctl) + return -EINVAL; + + /* don't support LM cursors when we we have source split enabled */ + if (mdp5_cstate->pipeline.r_mixer) + return -EINVAL; + + if (!handle) { + DBG("Cursor off"); + cursor_enable = false; + mdp5_crtc->cursor.iova = 0; + pm_runtime_get_sync(&pdev->dev); + goto set_cursor; + } + + cursor_bo = drm_gem_object_lookup(file, handle); + if (!cursor_bo) + return -ENOENT; + + ret = msm_gem_get_iova(cursor_bo, kms->aspace, + &mdp5_crtc->cursor.iova); + if (ret) + return -EINVAL; + + pm_runtime_get_sync(&pdev->dev); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + old_bo = mdp5_crtc->cursor.scanout_bo; + + mdp5_crtc->cursor.scanout_bo = cursor_bo; + mdp5_crtc->cursor.width = width; + mdp5_crtc->cursor.height = height; + + mdp5_crtc_restore_cursor(crtc); + + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + +set_cursor: + ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); + if (ret) { + dev_err(dev->dev, "failed to %sable cursor: %d\n", + cursor_enable ? "en" : "dis", ret); + goto end; + } + + crtc_flush(crtc, flush_mask); + +end: + pm_runtime_put_sync(&pdev->dev); + if (old_bo) { + drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); + /* enable vblank to complete cursor work: */ + request_pending(crtc, PENDING_CURSOR); + } + return ret; +} + +static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + struct drm_device *dev = crtc->dev; + uint32_t roi_w; + uint32_t roi_h; + unsigned long flags; + + if (!mdp5_crtc->lm_cursor_enabled) { + dev_warn(dev->dev, + "cursor_move is deprecated with cursor planes\n"); + return -EINVAL; + } + + /* don't support LM cursors when we we have source split enabled */ + if (mdp5_cstate->pipeline.r_mixer) + return -EINVAL; + + /* In case the CRTC is disabled, just drop the cursor update */ + if (unlikely(!crtc->state->enable)) + return 0; + + mdp5_crtc->cursor.x = x = max(x, 0); + mdp5_crtc->cursor.y = y = max(y, 0); + + get_roi(crtc, &roi_w, &roi_h); + + pm_runtime_get_sync(&mdp5_kms->pdev->dev); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + mdp5_crtc_restore_cursor(crtc); + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + crtc_flush(crtc, flush_mask); + + pm_runtime_put_sync(&mdp5_kms->pdev->dev); + + return 0; +} + +static void +mdp5_crtc_atomic_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_kms *mdp5_kms = get_kms(state->crtc); + + if (WARN_ON(!pipeline)) + return; + + drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ? + pipeline->mixer->name : "(null)"); + + if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) + drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ? + pipeline->r_mixer->name : "(null)"); +} + +static void mdp5_crtc_reset(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (crtc->state) { + __drm_atomic_helper_crtc_destroy_state(crtc->state); + kfree(to_mdp5_crtc_state(crtc->state)); + } + + mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL); + + if (mdp5_cstate) { + mdp5_cstate->base.crtc = crtc; + crtc->state = &mdp5_cstate->base; + } +} + +static struct drm_crtc_state * +mdp5_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc->state)) + return NULL; + + mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state), + sizeof(*mdp5_cstate), GFP_KERNEL); + if (!mdp5_cstate) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base); + + return &mdp5_cstate->base; +} + +static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(state); + + kfree(mdp5_cstate); +} + +static const struct drm_crtc_funcs mdp5_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, + .cursor_set = mdp5_crtc_cursor_set, + .cursor_move = mdp5_crtc_cursor_move, + .atomic_print_state = mdp5_crtc_atomic_print_state, +}; + +static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { + .mode_set_nofb = mdp5_crtc_mode_set_nofb, + .atomic_check = mdp5_crtc_atomic_check, + .atomic_begin = mdp5_crtc_atomic_begin, + .atomic_flush = mdp5_crtc_atomic_flush, + .atomic_enable = mdp5_crtc_atomic_enable, + .atomic_disable = mdp5_crtc_atomic_disable, +}; + +static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); + struct drm_crtc *crtc = &mdp5_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; + unsigned pending; + + mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); + + pending = atomic_xchg(&mdp5_crtc->pending, 0); + + if (pending & PENDING_FLIP) { + complete_flip(crtc, NULL); + } + + if (pending & PENDING_CURSOR) + drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); +} + +static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); + + DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus); +} + +static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, + pp_done); + + complete(&mdp5_crtc->pp_completion); +} + +static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + int ret; + + ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, + msecs_to_jiffies(50)); + if (ret == 0) + dev_warn(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); +} + +static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + int ret; + + /* Should not call this function if crtc is disabled. */ + if (!ctl) + return; + + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + ((mdp5_ctl_get_commit_status(ctl) & + mdp5_crtc->flushed_mask) == 0), + msecs_to_jiffies(50)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id); + + mdp5_crtc->flushed_mask = 0; + + drm_crtc_vblank_put(crtc); +} + +uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + return mdp5_crtc->vblank.irqmask; +} + +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + + /* should this be done elsewhere ? */ + mdp_irq_update(&mdp5_kms->base); + + mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline); +} + +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return mdp5_cstate->ctl; +} + +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc)) + return ERR_PTR(-EINVAL); + + mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return WARN_ON(!mdp5_cstate->pipeline.mixer) ? + ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer; +} + +struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc)) + return ERR_PTR(-EINVAL); + + mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return &mdp5_cstate->pipeline; +} + +void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + if (mdp5_cstate->cmd_mode) + mdp5_crtc_wait_for_pp_done(crtc); + else + mdp5_crtc_wait_for_flush_done(crtc); +} + +/* initialize crtc */ +struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id) +{ + struct drm_crtc *crtc = NULL; + struct mdp5_crtc *mdp5_crtc; + + mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); + if (!mdp5_crtc) + return ERR_PTR(-ENOMEM); + + crtc = &mdp5_crtc->base; + + mdp5_crtc->id = id; + + spin_lock_init(&mdp5_crtc->lm_lock); + spin_lock_init(&mdp5_crtc->cursor.lock); + init_completion(&mdp5_crtc->pp_completion); + + mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; + mdp5_crtc->err.irq = mdp5_crtc_err_irq; + mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; + + mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; + + drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, + &mdp5_crtc_funcs, NULL); + + drm_flip_work_init(&mdp5_crtc->unref_cursor_work, + "unref cursor", unref_cursor_worker); + + drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); + plane->crtc = crtc; + + return crtc; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c new file mode 100644 index 000000000000..439e0a300e25 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -0,0 +1,779 @@ +/* + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mdp5_kms.h" +#include "mdp5_ctl.h" + +/* + * CTL - MDP Control Pool Manager + * + * Controls are shared between all display interfaces. + * + * They are intended to be used for data path configuration. + * The top level register programming describes the complete data path for + * a specific data path ID - REG_MDP5_CTL_*(, ...) + * + * Hardware capabilities determine the number of concurrent data paths + * + * In certain use cases (high-resolution dual pipe), one single CTL can be + * shared across multiple CRTCs. + */ + +#define CTL_STAT_BUSY 0x1 +#define CTL_STAT_BOOKED 0x2 + +struct mdp5_ctl { + struct mdp5_ctl_manager *ctlm; + + u32 id; + + /* CTL status bitmask */ + u32 status; + + bool encoder_enabled; + uint32_t start_mask; + + /* REG_MDP5_CTL_*() registers access info + lock: */ + spinlock_t hw_lock; + u32 reg_offset; + + /* when do CTL registers need to be flushed? (mask of trigger bits) */ + u32 pending_ctl_trigger; + + bool cursor_on; + + /* True if the current CTL has FLUSH bits pending for single FLUSH. */ + bool flush_pending; + + struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ +}; + +struct mdp5_ctl_manager { + struct drm_device *dev; + + /* number of CTL / Layer Mixers in this hw config: */ + u32 nlm; + u32 nctl; + + /* to filter out non-present bits in the current hardware config */ + u32 flush_hw_mask; + + /* status for single FLUSH */ + bool single_flush_supported; + u32 single_flush_pending_mask; + + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ + spinlock_t pool_lock; + struct mdp5_ctl ctls[MAX_CTL]; +}; + +static inline +struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr) +{ + struct msm_drm_private *priv = ctl_mgr->dev->dev_private; + + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static inline +void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + + (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ + mdp5_write(mdp5_kms, reg, data); +} + +static inline +u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + + (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ + return mdp5_read(mdp5_kms, reg); +} + +static void set_display_intf(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf) +{ + unsigned long flags; + u32 intf_sel; + + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); + intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); + + switch (intf->num) { + case 0: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type); + break; + case 1: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type); + break; + case 2: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type); + break; + case 3: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type); + break; + default: + BUG(); + break; + } + + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); +} + +static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) +{ + unsigned long flags; + struct mdp5_interface *intf = pipeline->intf; + u32 ctl_op = 0; + + if (!mdp5_cfg_intf_is_virtual(intf->type)) + ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num); + + switch (intf->type) { + case INTF_DSI: + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + ctl_op |= MDP5_CTL_OP_CMD_MODE; + break; + + case INTF_WB: + if (intf->mode == MDP5_INTF_WB_MODE_LINE) + ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE); + break; + + default: + break; + } + + if (pipeline->r_mixer) + ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE | + MDP5_CTL_OP_PACK_3D(1); + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + struct mdp5_interface *intf = pipeline->intf; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + + ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | + mdp_ctl_flush_mask_encoder(intf); + if (r_mixer) + ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); + + /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ + if (!mdp5_cfg_intf_is_virtual(intf->type)) + set_display_intf(mdp5_kms, intf); + + set_ctl_op(ctl, pipeline); + + return 0; +} + +static bool start_signal_needed(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline) +{ + struct mdp5_interface *intf = pipeline->intf; + + if (!ctl->encoder_enabled || ctl->start_mask != 0) + return false; + + switch (intf->type) { + case INTF_WB: + return true; + case INTF_DSI: + return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; + default: + return false; + } +} + +/* + * send_start_signal() - Overlay Processor Start Signal + * + * For a given control operation (display pipeline), a START signal needs to be + * executed in order to kick off operation and activate all layers. + * e.g.: DSI command mode, Writeback + */ +static void send_start_signal(struct mdp5_ctl *ctl) +{ + unsigned long flags; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +static void refill_start_mask(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline) +{ + struct mdp5_interface *intf = pipeline->intf; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + + ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm); + if (r_mixer) + ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); + + /* + * Writeback encoder needs to program & flush + * address registers for each page flip.. + */ + if (intf->type == INTF_WB) + ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf); +} + +/** + * mdp5_ctl_set_encoder_state() - set the encoder state + * + * @enable: true, when encoder is ready for data streaming; false, otherwise. + * + * Note: + * This encoder state is needed to trigger START signal (data path kickoff). + */ +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline, + bool enabled) +{ + struct mdp5_interface *intf = pipeline->intf; + + if (WARN_ON(!ctl)) + return -EINVAL; + + ctl->encoder_enabled = enabled; + DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); + + if (start_signal_needed(ctl, pipeline)) { + send_start_signal(ctl); + refill_start_mask(ctl, pipeline); + } + + return 0; +} + +/* + * Note: + * CTL registers need to be flushed after calling this function + * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) + */ +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + int cursor_id, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + u32 blend_cfg; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + + if (unlikely(WARN_ON(!mixer))) { + dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM", + ctl->id); + return -EINVAL; + } + + if (pipeline->r_mixer) { + dev_err(ctl_mgr->dev->dev, "unsupported configuration"); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->hw_lock, flags); + + blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); + + if (enable) + blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; + else + blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; + + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); + ctl->cursor_on = enable; + + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); + + return 0; +} + +static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); + case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); + case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); + case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); + case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); + case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); + case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); + case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); + case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); + case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + case SSPP_CURSOR0: + case SSPP_CURSOR1: + default: return 0; + } +} + +static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) + return 0; + + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; + case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; + case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; + case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; + case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; + case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; + case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; + case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; + case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; + case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; + case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); + case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); + default: return 0; + } +} + +static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) +{ + unsigned long flags; + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + int i; + + spin_lock_irqsave(&ctl->hw_lock, flags); + + for (i = 0; i < ctl_mgr->nlm; i++) { + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); + } + + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +#define PIPE_LEFT 0 +#define PIPE_RIGHT 1 +int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + enum mdp5_pipe stage[][MAX_PIPE_STAGE], + enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], + u32 stage_cnt, u32 ctl_blend_op_flags) +{ + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + unsigned long flags; + u32 blend_cfg = 0, blend_ext_cfg = 0; + u32 r_blend_cfg = 0, r_blend_ext_cfg = 0; + int i, start_stage; + + mdp5_ctl_reset_blend_regs(ctl); + + if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { + start_stage = STAGE0; + blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + if (r_mixer) + r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + } else { + start_stage = STAGE_BASE; + } + + for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { + blend_cfg |= + mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i); + blend_ext_cfg |= + mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i); + if (r_mixer) { + r_blend_cfg |= + mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i); + r_blend_ext_cfg |= + mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i); + } + } + + spin_lock_irqsave(&ctl->hw_lock, flags); + if (ctl->cursor_on) + blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; + + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), + blend_ext_cfg); + if (r_mixer) { + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), + r_blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), + r_blend_ext_cfg); + } + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); + if (r_mixer) + ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); + + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm, + blend_cfg, blend_ext_cfg); + if (r_mixer) + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", + r_mixer->lm, r_blend_cfg, r_blend_ext_cfg); + + return 0; +} + +u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf) +{ + if (intf->type == INTF_WB) + return MDP5_CTL_FLUSH_WB; + + switch (intf->num) { + case 0: return MDP5_CTL_FLUSH_TIMING_0; + case 1: return MDP5_CTL_FLUSH_TIMING_1; + case 2: return MDP5_CTL_FLUSH_TIMING_2; + case 3: return MDP5_CTL_FLUSH_TIMING_3; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_cursor(int cursor_id) +{ + switch (cursor_id) { + case 0: return MDP5_CTL_FLUSH_CURSOR_0; + case 1: return MDP5_CTL_FLUSH_CURSOR_1; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; + case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; + case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; + case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; + case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; + case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; + case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; + case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; + case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; + case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; + case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; + case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_lm(int lm) +{ + switch (lm) { + case 0: return MDP5_CTL_FLUSH_LM0; + case 1: return MDP5_CTL_FLUSH_LM1; + case 2: return MDP5_CTL_FLUSH_LM2; + case 5: return MDP5_CTL_FLUSH_LM5; + default: return 0; + } +} + +static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + u32 flush_mask) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + u32 sw_mask = 0; +#define BIT_NEEDS_SW_FIX(bit) \ + (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) + + /* for some targets, cursor bit is the same as LM bit */ + if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) + sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm); + + return sw_mask; +} + +static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, + u32 *flush_id) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + + if (ctl->pair) { + DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); + ctl->flush_pending = true; + ctl_mgr->single_flush_pending_mask |= (*flush_mask); + *flush_mask = 0; + + if (ctl->pair->flush_pending) { + *flush_id = min_t(u32, ctl->id, ctl->pair->id); + *flush_mask = ctl_mgr->single_flush_pending_mask; + + ctl->flush_pending = false; + ctl->pair->flush_pending = false; + ctl_mgr->single_flush_pending_mask = 0; + + DBG("Single FLUSH mask %x,ID %d", *flush_mask, + *flush_id); + } + } +} + +/** + * mdp5_ctl_commit() - Register Flush + * + * The flush register is used to indicate several registers are all + * programmed, and are safe to update to the back copy of the double + * buffered registers. + * + * Some registers FLUSH bits are shared when the hardware does not have + * dedicated bits for them; handling these is the job of fix_sw_flush(). + * + * CTL registers need to be flushed in some circumstances; if that is the + * case, some trigger bits will be present in both flush mask and + * ctl->pending_ctl_trigger. + * + * Return H/W flushed bit mask. + */ +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline, + u32 flush_mask) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + u32 flush_id = ctl->id; + u32 curr_ctl_flush_mask; + + ctl->start_mask &= ~flush_mask; + + VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, + ctl->start_mask, ctl->pending_ctl_trigger); + + if (ctl->pending_ctl_trigger & flush_mask) { + flush_mask |= MDP5_CTL_FLUSH_CTL; + ctl->pending_ctl_trigger = 0; + } + + flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); + + flush_mask &= ctl_mgr->flush_hw_mask; + + curr_ctl_flush_mask = flush_mask; + + fix_for_single_flush(ctl, &flush_mask, &flush_id); + + if (flush_mask) { + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + } + + if (start_signal_needed(ctl, pipeline)) { + send_start_signal(ctl); + refill_start_mask(ctl, pipeline); + } + + return curr_ctl_flush_mask; +} + +u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) +{ + return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); +} + +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) +{ + return WARN_ON(!ctl) ? -EINVAL : ctl->id; +} + +/* + * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH + */ +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + + /* do nothing silently if hw doesn't support */ + if (!ctl_mgr->single_flush_supported) + return 0; + + if (!enable) { + ctlx->pair = NULL; + ctly->pair = NULL; + mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); + return 0; + } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { + dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); + return -EINVAL; + } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { + dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); + return -EINVAL; + } + + ctlx->pair = ctly; + ctly->pair = ctlx; + + mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, + MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); + + return 0; +} + +/* + * mdp5_ctl_request() - CTL allocation + * + * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. + * If no CTL is available in preferred category, allocate from the other one. + * + * @return fail if no CTL is available. + */ +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, + int intf_num) +{ + struct mdp5_ctl *ctl = NULL; + const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; + u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; + unsigned long flags; + int c; + + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + + /* search the preferred */ + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; + + dev_warn(ctl_mgr->dev->dev, + "fall back to the other CTL category for INTF %d!\n", intf_num); + + match ^= CTL_STAT_BOOKED; + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; + + dev_err(ctl_mgr->dev->dev, "No more CTL available!"); + goto unlock; + +found: + ctl = &ctl_mgr->ctls[c]; + ctl->status |= CTL_STAT_BUSY; + ctl->pending_ctl_trigger = 0; + DBG("CTL %d allocated", ctl->id); + +unlock: + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + return ctl; +} + +void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) +{ + unsigned long flags; + int c; + + for (c = 0; c < ctl_mgr->nctl; c++) { + struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + } +} + +void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) +{ + kfree(ctl_mgr); +} + +struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) +{ + struct mdp5_ctl_manager *ctl_mgr; + const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); + int rev = mdp5_cfg_get_hw_rev(cfg_hnd); + const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; + unsigned long flags; + int c, ret; + + ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); + if (!ctl_mgr) { + dev_err(dev->dev, "failed to allocate CTL manager\n"); + ret = -ENOMEM; + goto fail; + } + + if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) { + dev_err(dev->dev, "Increase static pool size to at least %d\n", + ctl_cfg->count); + ret = -ENOSPC; + goto fail; + } + + /* initialize the CTL manager: */ + ctl_mgr->dev = dev; + ctl_mgr->nlm = hw_cfg->lm.count; + ctl_mgr->nctl = ctl_cfg->count; + ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; + spin_lock_init(&ctl_mgr->pool_lock); + + /* initialize each CTL of the pool: */ + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + for (c = 0; c < ctl_mgr->nctl; c++) { + struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; + + if (WARN_ON(!ctl_cfg->base[c])) { + dev_err(dev->dev, "CTL_%d: base is null!\n", c); + ret = -EINVAL; + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + goto fail; + } + ctl->ctlm = ctl_mgr; + ctl->id = c; + ctl->reg_offset = ctl_cfg->base[c]; + ctl->status = 0; + spin_lock_init(&ctl->hw_lock); + } + + /* + * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI + * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when + * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. + * Single FLUSH is supported from hw rev v3.0. + */ + if (rev >= 3) { + ctl_mgr->single_flush_supported = true; + /* Reserve CTL0/1 for INTF1/2 */ + ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; + ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; + } + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + DBG("Pool of %d CTLs created.", ctl_mgr->nctl); + + return ctl_mgr; + +fail: + if (ctl_mgr) + mdp5_ctlm_destroy(ctl_mgr); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h new file mode 100644 index 000000000000..b63120388dc6 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDP5_CTL_H__ +#define __MDP5_CTL_H__ + +#include "msm_drv.h" + +/* + * CTL Manager prototypes: + * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler, + * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions. + */ +struct mdp5_ctl_manager; +struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); +void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); +void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); + +/* + * CTL prototypes: + * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, + * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. + */ +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); + +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); + +struct mdp5_interface; +struct mdp5_pipeline; +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p); +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p, + bool enabled); + +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + int cursor_id, bool enable); +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); + +#define MAX_PIPE_STAGE 2 + +/* + * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) + * + * @stage: array to contain the pipe num for each stage + * @stage_cnt: valid stage number in stage array + * @ctl_blend_op_flags: blender operation mode flags + * + * Note: + * CTL registers need to be flushed after calling this function + * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) + */ +#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) +int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + enum mdp5_pipe stage[][MAX_PIPE_STAGE], + enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], + u32 stage_cnt, u32 ctl_blend_op_flags); + +/** + * mdp_ctl_flush_mask...() - Register FLUSH masks + * + * These masks are used to specify which block(s) need to be flushed + * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask). + */ +u32 mdp_ctl_flush_mask_lm(int lm); +u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe); +u32 mdp_ctl_flush_mask_cursor(int cursor_id); +u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); + +/* @flush_mask: see CTL flush masks definitions below */ +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + u32 flush_mask); +u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); + + + +#endif /* __MDP5_CTL_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c new file mode 100644 index 000000000000..36ad3cbe5f79 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "mdp5_kms.h" + +static struct mdp5_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING +#include +#include +#include +#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ + { \ + .src = MSM_BUS_MASTER_MDP_PORT0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = (ab_val), \ + .ib = (ib_val), \ + } + +static struct msm_bus_vectors mdp_bus_vectors[] = { + MDP_BUS_VECTOR_ENTRY(0, 0), + MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), +}; +static struct msm_bus_paths mdp_bus_usecases[] = { { + .num_paths = 1, + .vectors = &mdp_bus_vectors[0], +}, { + .num_paths = 1, + .vectors = &mdp_bus_vectors[1], +} }; +static struct msm_bus_scale_pdata mdp_bus_scale_table = { + .usecase = mdp_bus_usecases, + .num_usecases = ARRAY_SIZE(mdp_bus_usecases), + .name = "mdss_mdp", +}; + +static void bs_init(struct mdp5_encoder *mdp5_encoder) +{ + mdp5_encoder->bsc = msm_bus_scale_register_client( + &mdp_bus_scale_table); + DBG("bus scale client: %08x", mdp5_encoder->bsc); +} + +static void bs_fini(struct mdp5_encoder *mdp5_encoder) +{ + if (mdp5_encoder->bsc) { + msm_bus_scale_unregister_client(mdp5_encoder->bsc); + mdp5_encoder->bsc = 0; + } +} + +static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) +{ + if (mdp5_encoder->bsc) { + DBG("set bus scaling: %d", idx); + /* HACK: scaling down, and then immediately back up + * seems to leave things broken (underflow).. so + * never disable: + */ + idx = 1; + msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx); + } +} +#else +static void bs_init(struct mdp5_encoder *mdp5_encoder) {} +static void bs_fini(struct mdp5_encoder *mdp5_encoder) {} +static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {} +#endif + +static void mdp5_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + bs_fini(mdp5_encoder); + drm_encoder_cleanup(encoder); + kfree(mdp5_encoder); +} + +static const struct drm_encoder_funcs mdp5_encoder_funcs = { + .destroy = mdp5_encoder_destroy, +}; + +static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + int intf = mdp5_encoder->intf->num; + uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + uint32_t format = 0x2100; + unsigned long flags; + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + ctrl_pol = 0; + + /* DSI controller cannot handle active-low sync signals. */ + if (mdp5_encoder->intf->type != INTF_DSI) { + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; + } + /* probably need to get DATA_EN polarity from panel.. */ + + dtv_hsync_skew = 0; /* get this from panel? */ + + /* Get color format from panel, default is 8bpc */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + switch (connector->display_info.bpc) { + case 4: + format |= 0; + break; + case 5: + format |= 0x15; + break; + case 6: + format |= 0x2A; + break; + case 8: + default: + format |= 0x3F; + break; + } + break; + } + } + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + if (mdp5_encoder->intf->type == INTF_eDP) { + display_v_start += mode->htotal - mode->hsync_start; + display_v_end -= mode->hsync_start - mode->hdisplay; + } + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + + mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), + MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); + mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period); + mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len); + mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf), + MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) | + MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x)); + mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start); + mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end); + mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff); + mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew); + mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol); + mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf), + MDP5_INTF_ACTIVE_HCTL_START(0) | + MDP5_INTF_ACTIVE_HCTL_END(0)); + mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); + mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ + + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + mdp5_crtc_set_pipeline(encoder->crtc); +} + +static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + struct mdp5_interface *intf = mdp5_encoder->intf; + int intfn = mdp5_encoder->intf->num; + unsigned long flags; + + if (WARN_ON(!mdp5_encoder->enabled)) + return; + + mdp5_ctl_set_encoder_state(ctl, pipeline, false); + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf)); + + bs_set(mdp5_encoder, 0); + + mdp5_encoder->enabled = false; +} + +static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + struct mdp5_interface *intf = mdp5_encoder->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + int intfn = intf->num; + unsigned long flags; + + if (WARN_ON(mdp5_encoder->enabled)) + return; + + bs_set(mdp5_encoder, 1); + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + + mdp5_ctl_set_encoder_state(ctl, pipeline, true); + + mdp5_encoder->enabled = true; +} + +static void mdp5_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); + else + mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); +} + +static void mdp5_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_disable(encoder); + else + mdp5_vid_encoder_disable(encoder); +} + +static void mdp5_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + /* this isn't right I think */ + struct drm_crtc_state *cstate = encoder->crtc->state; + + mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode); + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_enable(encoder); + else + mdp5_vid_encoder_enable(encoder); +} + +static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state); + struct mdp5_interface *intf = mdp5_encoder->intf; + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + + mdp5_cstate->ctl = ctl; + mdp5_cstate->pipeline.intf = intf; + + return 0; +} + +static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { + .disable = mdp5_encoder_disable, + .enable = mdp5_encoder_enable, + .atomic_check = mdp5_encoder_atomic_check, +}; + +int mdp5_encoder_get_linecount(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf->num; + + return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf)); +} + +u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf->num; + + return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); +} + +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); + struct mdp5_kms *mdp5_kms; + struct device *dev; + int intf_num; + u32 data = 0; + + if (!encoder || !slave_encoder) + return -EINVAL; + + mdp5_kms = get_kms(encoder); + intf_num = mdp5_encoder->intf->num; + + /* Switch slave encoder's TimingGen Sync mode, + * to use the master's enable signal for the slave encoder. + */ + if (intf_num == 1) + data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; + else if (intf_num == 2) + data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; + else + return -EINVAL; + + dev = &mdp5_kms->pdev->dev; + /* Make sure clocks are on when connectors calling this function. */ + pm_runtime_get_sync(dev); + + /* Dumb Panel, Sync mode */ + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + + mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); + + pm_runtime_put_sync(dev); + + return 0; +} + +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + + /* TODO: Expand this to set writeback modes too */ + if (cmd_mode) { + WARN_ON(intf->type != INTF_DSI); + intf->mode = MDP5_INTF_DSI_MODE_COMMAND; + } else { + if (intf->type == INTF_DSI) + intf->mode = MDP5_INTF_DSI_MODE_VIDEO; + else + intf->mode = MDP5_INTF_MODE_NONE; + } +} + +/* initialize encoder */ +struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf, + struct mdp5_ctl *ctl) +{ + struct drm_encoder *encoder = NULL; + struct mdp5_encoder *mdp5_encoder; + int enc_type = (intf->type == INTF_DSI) ? + DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS; + int ret; + + mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); + if (!mdp5_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp5_encoder->base; + mdp5_encoder->ctl = ctl; + mdp5_encoder->intf = intf; + + spin_lock_init(&mdp5_encoder->intf_lock); + + drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL); + + drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); + + bs_init(mdp5_encoder); + + return encoder; + +fail: + if (encoder) + mdp5_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c new file mode 100644 index 000000000000..280e368bc9bb --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include + +#include "msm_drv.h" +#include "mdp5_kms.h" + +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) +{ + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); +} + +static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler); + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); + extern bool dumpstate; + + DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); + + if (dumpstate && __ratelimit(&rs)) { + struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev); + drm_state_dump(mdp5_kms->dev, &p); + if (mdp5_kms->smp) + mdp5_smp_dump(mdp5_kms->smp, &p); + } +} + +void mdp5_irq_preinstall(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); + pm_runtime_put_sync(dev); +} + +int mdp5_irq_postinstall(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); + struct device *dev = &mdp5_kms->pdev->dev; + struct mdp_irq *error_handler = &mdp5_kms->error_handler; + + error_handler->irq = mdp5_irq_error_handler; + error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN | + MDP5_IRQ_INTF1_UNDER_RUN | + MDP5_IRQ_INTF2_UNDER_RUN | + MDP5_IRQ_INTF3_UNDER_RUN; + + pm_runtime_get_sync(dev); + mdp_irq_register(mdp_kms, error_handler); + pm_runtime_put_sync(dev); + + return 0; +} + +void mdp5_irq_uninstall(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); + pm_runtime_put_sync(dev); +} + +irqreturn_t mdp5_irq(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + unsigned int id; + uint32_t status, enable; + + enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN); + status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable; + mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); + + VERB("status=%08x", status); + + mdp_dispatch_irqs(mdp_kms, status); + + for (id = 0; id < priv->num_crtcs; id++) + if (status & mdp5_crtc_vblank(priv->crtcs[id])) + drm_handle_vblank(dev, id); + + return IRQ_HANDLED; +} + +int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp5_crtc_vblank(crtc), true); + pm_runtime_put_sync(dev); + + return 0; +} + +void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp5_crtc_vblank(crtc), false); + pm_runtime_put_sync(dev); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c new file mode 100644 index 000000000000..6d8e3a9a6fc0 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -0,0 +1,1067 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" +#include "mdp5_kms.h" + +static const char *iommu_ports[] = { + "mdp_0", +}; + +static int mdp5_hw_init(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + unsigned long flags; + + pm_runtime_get_sync(dev); + + /* Magic unknown register writes: + * + * W VBIF:0x004 00000001 (mdss_mdp.c:839) + * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) + * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) + * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) + * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) + * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) + * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) + * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) + * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) + * + * Downstream fbdev driver gets these register offsets/values + * from DT.. not really sure what these registers are or if + * different values for different boards/SoC's, etc. I guess + * they are the golden registers. + * + * Not setting these does not seem to cause any problem. But + * we may be getting lucky with the bootloader initializing + * them for us. OTOH, if we can always count on the bootloader + * setting the golden registers, then perhaps we don't need to + * care. + */ + + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); + + mdp5_ctlm_hw_reset(mdp5_kms->ctlm); + + pm_runtime_put_sync(dev); + + return 0; +} + +struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct msm_kms_state *state = to_kms_state(s); + struct mdp5_state *new_state; + int ret; + + if (state->state) + return state->state; + + ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); + if (!new_state) + return ERR_PTR(-ENOMEM); + + /* Copy state: */ + new_state->hwpipe = mdp5_kms->state->hwpipe; + new_state->hwmixer = mdp5_kms->state->hwmixer; + if (mdp5_kms->smp) + new_state->smp = mdp5_kms->state->smp; + + state->state = new_state; + + return new_state; +} + +static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + swap(to_kms_state(state)->state, mdp5_kms->state); +} + +static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + + if (mdp5_kms->smp) + mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); +} + +static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + if (mdp5_kms->smp) + mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); + + pm_runtime_put_sync(dev); +} + +static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, + struct drm_crtc *crtc) +{ + mdp5_crtc_wait_for_commit_done(crtc); +} + +static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, + struct drm_encoder *encoder) +{ + return rate; +} + +static int mdp5_set_split_display(struct msm_kms *kms, + struct drm_encoder *encoder, + struct drm_encoder *slave_encoder, + bool is_cmd_mode) +{ + if (is_cmd_mode) + return mdp5_cmd_encoder_set_split_display(encoder, + slave_encoder); + else + return mdp5_vid_encoder_set_split_display(encoder, + slave_encoder); +} + +static void mdp5_set_encoder_mode(struct msm_kms *kms, + struct drm_encoder *encoder, + bool cmd_mode) +{ + mdp5_encoder_set_intf_mode(encoder, cmd_mode); +} + +static void mdp5_kms_destroy(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct msm_gem_address_space *aspace = kms->aspace; + int i; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) + mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) + mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_put(aspace); + } +} + +#ifdef CONFIG_DEBUG_FS +static int smp_show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct drm_printer p = drm_seq_file_printer(m); + + if (!mdp5_kms->smp) { + drm_printf(&p, "no SMP pool\n"); + return 0; + } + + mdp5_smp_dump(mdp5_kms->smp, &p); + + return 0; +} + +static struct drm_info_list mdp5_debugfs_list[] = { + {"smp", smp_show }, +}; + +static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install mdp5_debugfs_list\n"); + return ret; + } + + return 0; +} +#endif + +static const struct mdp_kms_funcs kms_funcs = { + .base = { + .hw_init = mdp5_hw_init, + .irq_preinstall = mdp5_irq_preinstall, + .irq_postinstall = mdp5_irq_postinstall, + .irq_uninstall = mdp5_irq_uninstall, + .irq = mdp5_irq, + .enable_vblank = mdp5_enable_vblank, + .disable_vblank = mdp5_disable_vblank, + .swap_state = mdp5_swap_state, + .prepare_commit = mdp5_prepare_commit, + .complete_commit = mdp5_complete_commit, + .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, + .get_format = mdp_get_format, + .round_pixclk = mdp5_round_pixclk, + .set_split_display = mdp5_set_split_display, + .set_encoder_mode = mdp5_set_encoder_mode, + .destroy = mdp5_kms_destroy, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = mdp5_kms_debugfs_init, +#endif + }, + .set_irqmask = mdp5_set_irqmask, +}; + +int mdp5_disable(struct mdp5_kms *mdp5_kms) +{ + DBG(""); + + mdp5_kms->enable_count--; + WARN_ON(mdp5_kms->enable_count < 0); + + clk_disable_unprepare(mdp5_kms->ahb_clk); + clk_disable_unprepare(mdp5_kms->axi_clk); + clk_disable_unprepare(mdp5_kms->core_clk); + if (mdp5_kms->lut_clk) + clk_disable_unprepare(mdp5_kms->lut_clk); + + return 0; +} + +int mdp5_enable(struct mdp5_kms *mdp5_kms) +{ + DBG(""); + + mdp5_kms->enable_count++; + + clk_prepare_enable(mdp5_kms->ahb_clk); + clk_prepare_enable(mdp5_kms->axi_clk); + clk_prepare_enable(mdp5_kms->core_clk); + if (mdp5_kms->lut_clk) + clk_prepare_enable(mdp5_kms->lut_clk); + + return 0; +} + +static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf, + struct mdp5_ctl *ctl) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_encoder *encoder; + + encoder = mdp5_encoder_init(dev, intf, ctl); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct encoder\n"); + return encoder; + } + + priv->encoders[priv->num_encoders++] = encoder; + + return encoder; +} + +static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) +{ + const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; + const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); + int id = 0, i; + + for (i = 0; i < intf_cnt; i++) { + if (intfs[i] == INTF_DSI) { + if (intf_num == i) + return id; + + id++; + } + } + + return -EINVAL; +} + +static int modeset_init_intf(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; + struct mdp5_ctl *ctl; + struct drm_encoder *encoder; + int ret = 0; + + switch (intf->type) { + case INTF_eDP: + if (!priv->edp) + break; + + ctl = mdp5_ctlm_request(ctlm, intf->num); + if (!ctl) { + ret = -EINVAL; + break; + } + + encoder = construct_encoder(mdp5_kms, intf, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = msm_edp_modeset_init(priv->edp, dev, encoder); + break; + case INTF_HDMI: + if (!priv->hdmi) + break; + + ctl = mdp5_ctlm_request(ctlm, intf->num); + if (!ctl) { + ret = -EINVAL; + break; + } + + encoder = construct_encoder(mdp5_kms, intf, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); + break; + case INTF_DSI: + { + const struct mdp5_cfg_hw *hw_cfg = + mdp5_cfg_get_hw_config(mdp5_kms->cfg); + int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); + + if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { + dev_err(dev->dev, "failed to find dsi from intf %d\n", + intf->num); + ret = -EINVAL; + break; + } + + if (!priv->dsi[dsi_id]) + break; + + ctl = mdp5_ctlm_request(ctlm, intf->num); + if (!ctl) { + ret = -EINVAL; + break; + } + + encoder = construct_encoder(mdp5_kms, intf, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); + break; + } + default: + dev_err(dev->dev, "unknown intf: %d\n", intf->type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int modeset_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + const struct mdp5_cfg_hw *hw_cfg; + unsigned int num_crtcs; + int i, ret, pi = 0, ci = 0; + struct drm_plane *primary[MAX_BASES] = { NULL }; + struct drm_plane *cursor[MAX_BASES] = { NULL }; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* + * Construct encoders and modeset initialize connector devices + * for each external display interface. + */ + for (i = 0; i < mdp5_kms->num_intfs; i++) { + ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); + if (ret) + goto fail; + } + + /* + * We should ideally have less number of encoders (set up by parsing + * the MDP5 interfaces) than the number of layer mixers present in HW, + * but let's be safe here anyway + */ + num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); + + /* + * Construct planes equaling the number of hw pipes, and CRTCs for the + * N encoders set up by the driver. The first N planes become primary + * planes for the CRTCs, with the remainder as overlay planes: + */ + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + struct drm_plane *plane; + enum drm_plane_type type; + + if (i < num_crtcs) + type = DRM_PLANE_TYPE_PRIMARY; + else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) + type = DRM_PLANE_TYPE_CURSOR; + else + type = DRM_PLANE_TYPE_OVERLAY; + + plane = mdp5_plane_init(dev, type); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); + goto fail; + } + priv->planes[priv->num_planes++] = plane; + + if (type == DRM_PLANE_TYPE_PRIMARY) + primary[pi++] = plane; + if (type == DRM_PLANE_TYPE_CURSOR) + cursor[ci++] = plane; + } + + for (i = 0; i < num_crtcs; i++) { + struct drm_crtc *crtc; + + crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); + if (IS_ERR(crtc)) { + ret = PTR_ERR(crtc); + dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); + goto fail; + } + priv->crtcs[priv->num_crtcs++] = crtc; + } + + /* + * Now that we know the number of crtcs we've created, set the possible + * crtcs for the encoders + */ + for (i = 0; i < priv->num_encoders; i++) { + struct drm_encoder *encoder = priv->encoders[i]; + + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; + } + + return 0; + +fail: + return ret; +} + +static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, + u32 *major, u32 *minor) +{ + struct device *dev = &mdp5_kms->pdev->dev; + u32 version; + + pm_runtime_get_sync(dev); + version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); + pm_runtime_put_sync(dev); + + *major = FIELD(version, MDP5_HW_VERSION_MAJOR); + *minor = FIELD(version, MDP5_HW_VERSION_MINOR); + + dev_info(dev, "MDP5 version v%d.%d", *major, *minor); +} + +static int get_clk(struct platform_device *pdev, struct clk **clkp, + const char *name, bool mandatory) +{ + struct device *dev = &pdev->dev; + struct clk *clk = msm_clk_get(pdev, name); + if (IS_ERR(clk) && mandatory) { + dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + if (IS_ERR(clk)) + DBG("skipping %s", name); + else + *clkp = clk; + + return 0; +} + +static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) + if (encoder->crtc == crtc) + return encoder; + + return NULL; +} + +static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int line, vsw, vbp, vactive_start, vactive_end, vfp_end; + + crtc = priv->crtcs[pipe]; + if (!crtc) { + DRM_ERROR("Invalid crtc %d\n", pipe); + return false; + } + + encoder = get_encoder_from_crtc(crtc); + if (!encoder) { + DRM_ERROR("no encoder found for crtc %d\n", pipe); + return false; + } + + vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; + vbp = mode->crtc_vtotal - mode->crtc_vsync_end; + + /* + * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at + * the end of VFP. Translate the porch values relative to the line + * counter positions. + */ + + vactive_start = vsw + vbp + 1; + + vactive_end = vactive_start + mode->crtc_vdisplay; + + /* last scan line before VSYNC */ + vfp_end = mode->crtc_vtotal; + + if (stime) + *stime = ktime_get(); + + line = mdp5_encoder_get_linecount(encoder); + + if (line < vactive_start) { + line -= vactive_start; + } else if (line > vactive_end) { + line = line - vfp_end - vactive_start; + } else { + line -= vactive_start; + } + + *vpos = line; + *hpos = 0; + + if (etime) + *etime = ktime_get(); + + return true; +} + +static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + + if (pipe >= priv->num_crtcs) + return 0; + + crtc = priv->crtcs[pipe]; + if (!crtc) + return 0; + + encoder = get_encoder_from_crtc(crtc); + if (!encoder) + return 0; + + return mdp5_encoder_get_framecount(encoder); +} + +struct msm_kms *mdp5_kms_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev; + struct mdp5_kms *mdp5_kms; + struct mdp5_cfg *config; + struct msm_kms *kms; + struct msm_gem_address_space *aspace; + int irq, i, ret; + + /* priv->kms would have been populated by the MDP5 driver */ + kms = priv->kms; + if (!kms) + return NULL; + + mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp_kms_init(&mdp5_kms->base, &kms_funcs); + + pdev = mdp5_kms->pdev; + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (irq < 0) { + ret = irq; + dev_err(&pdev->dev, "failed to get irq: %d\n", ret); + goto fail; + } + + kms->irq = irq; + + config = mdp5_cfg_get_config(mdp5_kms->cfg); + + /* make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + pm_runtime_get_sync(&pdev->dev); + for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { + if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || + !config->hw->intf.base[i]) + continue; + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); + + mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); + } + mdelay(16); + + if (config->platform.iommu) { + aspace = msm_gem_address_space_create(&pdev->dev, + config->platform.iommu, "mdp5"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); + goto fail; + } + + kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, + ARRAY_SIZE(iommu_ports)); + if (ret) { + dev_err(&pdev->dev, "failed to attach iommu: %d\n", + ret); + goto fail; + } + } else { + dev_info(&pdev->dev, + "no iommu, fallback to phys contig buffers for scanout\n"); + aspace = NULL; + } + + pm_runtime_put_sync(&pdev->dev); + + ret = modeset_init(mdp5_kms); + if (ret) { + dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); + goto fail; + } + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 0xffff; + dev->mode_config.max_height = 0xffff; + + dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; + dev->driver->get_scanout_position = mdp5_get_scanoutpos; + dev->driver->get_vblank_counter = mdp5_get_vblank_counter; + dev->max_vblank_count = 0xffffffff; + dev->vblank_disable_immediate = true; + + return kms; +fail: + if (kms) + mdp5_kms_destroy(kms); + return ERR_PTR(ret); +} + +static void mdp5_destroy(struct platform_device *pdev) +{ + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + int i; + + if (mdp5_kms->ctlm) + mdp5_ctlm_destroy(mdp5_kms->ctlm); + if (mdp5_kms->smp) + mdp5_smp_destroy(mdp5_kms->smp); + if (mdp5_kms->cfg) + mdp5_cfg_destroy(mdp5_kms->cfg); + + for (i = 0; i < mdp5_kms->num_intfs; i++) + kfree(mdp5_kms->intfs[i]); + + if (mdp5_kms->rpm_enabled) + pm_runtime_disable(&pdev->dev); + + kfree(mdp5_kms->state); +} + +static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, + const enum mdp5_pipe *pipes, const uint32_t *offsets, + uint32_t caps) +{ + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < cnt; i++) { + struct mdp5_hw_pipe *hwpipe; + + hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); + if (IS_ERR(hwpipe)) { + ret = PTR_ERR(hwpipe); + dev_err(dev->dev, "failed to construct pipe for %s (%d)\n", + pipe2name(pipes[i]), ret); + return ret; + } + hwpipe->idx = mdp5_kms->num_hwpipes; + mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; + } + + return 0; +} + +static int hwpipe_init(struct mdp5_kms *mdp5_kms) +{ + static const enum mdp5_pipe rgb_planes[] = { + SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, + }; + static const enum mdp5_pipe vig_planes[] = { + SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, + }; + static const enum mdp5_pipe dma_planes[] = { + SSPP_DMA0, SSPP_DMA1, + }; + static const enum mdp5_pipe cursor_planes[] = { + SSPP_CURSOR0, SSPP_CURSOR1, + }; + const struct mdp5_cfg_hw *hw_cfg; + int ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* Construct RGB pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, + hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); + if (ret) + return ret; + + /* Construct video (VIG) pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, + hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); + if (ret) + return ret; + + /* Construct DMA pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, + hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); + if (ret) + return ret; + + /* Construct cursor pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, + cursor_planes, hw_cfg->pipe_cursor.base, + hw_cfg->pipe_cursor.caps); + if (ret) + return ret; + + return 0; +} + +static int hwmixer_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + const struct mdp5_cfg_hw *hw_cfg; + int i, ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + for (i = 0; i < hw_cfg->lm.count; i++) { + struct mdp5_hw_mixer *mixer; + + mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); + if (IS_ERR(mixer)) { + ret = PTR_ERR(mixer); + dev_err(dev->dev, "failed to construct LM%d (%d)\n", + i, ret); + return ret; + } + + mixer->idx = mdp5_kms->num_hwmixers; + mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; + } + + return 0; +} + +static int interface_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + const struct mdp5_cfg_hw *hw_cfg; + const enum mdp5_intf_type *intf_types; + int i; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + intf_types = hw_cfg->intf.connect; + + for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { + struct mdp5_interface *intf; + + if (intf_types[i] == INTF_DISABLED) + continue; + + intf = kzalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) { + dev_err(dev->dev, "failed to construct INTF%d\n", i); + return -ENOMEM; + } + + intf->num = i; + intf->type = intf_types[i]; + intf->mode = MDP5_INTF_MODE_NONE; + intf->idx = mdp5_kms->num_intfs; + mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; + } + + return 0; +} + +static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms; + struct mdp5_cfg *config; + u32 major, minor; + int ret; + + mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); + if (!mdp5_kms) { + ret = -ENOMEM; + goto fail; + } + + platform_set_drvdata(pdev, mdp5_kms); + + spin_lock_init(&mdp5_kms->resource_lock); + + mdp5_kms->dev = dev; + mdp5_kms->pdev = pdev; + + drm_modeset_lock_init(&mdp5_kms->state_lock); + mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); + if (!mdp5_kms->state) { + ret = -ENOMEM; + goto fail; + } + + mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); + if (IS_ERR(mdp5_kms->mmio)) { + ret = PTR_ERR(mdp5_kms->mmio); + goto fail; + } + + /* mandatory clocks: */ + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); + if (ret) + goto fail; + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); + if (ret) + goto fail; + ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); + if (ret) + goto fail; + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); + if (ret) + goto fail; + + /* optional clocks: */ + get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); + + /* we need to set a default rate before enabling. Set a safe + * rate first, then figure out hw revision, and then set a + * more optimal rate: + */ + clk_set_rate(mdp5_kms->core_clk, 200000000); + + pm_runtime_enable(&pdev->dev); + mdp5_kms->rpm_enabled = true; + + read_mdp_hw_revision(mdp5_kms, &major, &minor); + + mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); + if (IS_ERR(mdp5_kms->cfg)) { + ret = PTR_ERR(mdp5_kms->cfg); + mdp5_kms->cfg = NULL; + goto fail; + } + + config = mdp5_cfg_get_config(mdp5_kms->cfg); + mdp5_kms->caps = config->hw->mdp.caps; + + /* TODO: compute core clock rate at runtime */ + clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); + + /* + * Some chipsets have a Shared Memory Pool (SMP), while others + * have dedicated latency buffering per source pipe instead; + * this section initializes the SMP: + */ + if (mdp5_kms->caps & MDP_CAP_SMP) { + mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); + if (IS_ERR(mdp5_kms->smp)) { + ret = PTR_ERR(mdp5_kms->smp); + mdp5_kms->smp = NULL; + goto fail; + } + } + + mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); + if (IS_ERR(mdp5_kms->ctlm)) { + ret = PTR_ERR(mdp5_kms->ctlm); + mdp5_kms->ctlm = NULL; + goto fail; + } + + ret = hwpipe_init(mdp5_kms); + if (ret) + goto fail; + + ret = hwmixer_init(mdp5_kms); + if (ret) + goto fail; + + ret = interface_init(mdp5_kms); + if (ret) + goto fail; + + /* set uninit-ed kms */ + priv->kms = &mdp5_kms->base.base; + + return 0; +fail: + mdp5_destroy(pdev); + return ret; +} + +static int mdp5_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *ddev = dev_get_drvdata(master); + struct platform_device *pdev = to_platform_device(dev); + + DBG(""); + + return mdp5_init(pdev, ddev); +} + +static void mdp5_unbind(struct device *dev, struct device *master, + void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + mdp5_destroy(pdev); +} + +static const struct component_ops mdp5_ops = { + .bind = mdp5_bind, + .unbind = mdp5_unbind, +}; + +static int mdp5_dev_probe(struct platform_device *pdev) +{ + DBG(""); + return component_add(&pdev->dev, &mdp5_ops); +} + +static int mdp5_dev_remove(struct platform_device *pdev) +{ + DBG(""); + component_del(&pdev->dev, &mdp5_ops); + return 0; +} + +static __maybe_unused int mdp5_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + + DBG(""); + + return mdp5_disable(mdp5_kms); +} + +static __maybe_unused int mdp5_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + + DBG(""); + + return mdp5_enable(mdp5_kms); +} + +static const struct dev_pm_ops mdp5_pm_ops = { + SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) +}; + +static const struct of_device_id mdp5_dt_match[] = { + { .compatible = "qcom,mdp5", }, + /* to support downstream DT files */ + { .compatible = "qcom,mdss_mdp", }, + {} +}; +MODULE_DEVICE_TABLE(of, mdp5_dt_match); + +static struct platform_driver mdp5_driver = { + .probe = mdp5_dev_probe, + .remove = mdp5_dev_remove, + .driver = { + .name = "msm_mdp", + .of_match_table = mdp5_dt_match, + .pm = &mdp5_pm_ops, + }, +}; + +void __init msm_mdp_register(void) +{ + DBG(""); + platform_driver_register(&mdp5_driver); +} + +void __exit msm_mdp_unregister(void) +{ + DBG(""); + platform_driver_unregister(&mdp5_driver); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h new file mode 100644 index 000000000000..aeb94aa461b5 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP5_KMS_H__ +#define __MDP5_KMS_H__ + +#include "msm_drv.h" +#include "msm_kms.h" +#include "disp/mdp_kms.h" +#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ +#include "mdp5.xml.h" +#include "mdp5_pipe.h" +#include "mdp5_mixer.h" +#include "mdp5_ctl.h" +#include "mdp5_smp.h" + +struct mdp5_state; + +struct mdp5_kms { + struct mdp_kms base; + + struct drm_device *dev; + + struct platform_device *pdev; + + unsigned num_hwpipes; + struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; + + unsigned num_hwmixers; + struct mdp5_hw_mixer *hwmixers[8]; + + unsigned num_intfs; + struct mdp5_interface *intfs[5]; + + struct mdp5_cfg_handler *cfg; + uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ + + /** + * Global atomic state. Do not access directly, use mdp5_get_state() + */ + struct mdp5_state *state; + struct drm_modeset_lock state_lock; + + struct mdp5_smp *smp; + struct mdp5_ctl_manager *ctlm; + + /* io/register spaces: */ + void __iomem *mmio; + + struct clk *axi_clk; + struct clk *ahb_clk; + struct clk *core_clk; + struct clk *lut_clk; + struct clk *vsync_clk; + + /* + * lock to protect access to global resources: ie., following register: + * - REG_MDP5_DISP_INTF_SEL + */ + spinlock_t resource_lock; + + bool rpm_enabled; + + struct mdp_irq error_handler; + + int enable_count; +}; +#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) + +/* Global atomic state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + * + * For atomic updates which require modifying global state, + */ +struct mdp5_state { + struct mdp5_hw_pipe_state hwpipe; + struct mdp5_hw_mixer_state hwmixer; + struct mdp5_smp_state smp; +}; + +struct mdp5_state *__must_check +mdp5_get_state(struct drm_atomic_state *s); + +/* Atomic plane state. Subclasses the base drm_plane_state in order to + * track assigned hwpipe and hw specific state. + */ +struct mdp5_plane_state { + struct drm_plane_state base; + + struct mdp5_hw_pipe *hwpipe; + struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */ + + /* aligned with property */ + uint8_t premultiplied; + uint8_t zpos; + uint8_t alpha; + + /* assigned by crtc blender */ + enum mdp_mixer_stage_id stage; +}; +#define to_mdp5_plane_state(x) \ + container_of(x, struct mdp5_plane_state, base) + +struct mdp5_pipeline { + struct mdp5_interface *intf; + struct mdp5_hw_mixer *mixer; + struct mdp5_hw_mixer *r_mixer; /* right mixer */ +}; + +struct mdp5_crtc_state { + struct drm_crtc_state base; + + struct mdp5_ctl *ctl; + struct mdp5_pipeline pipeline; + + /* these are derivatives of intf/mixer state in mdp5_pipeline */ + u32 vblank_irqmask; + u32 err_irqmask; + u32 pp_done_irqmask; + + bool cmd_mode; +}; +#define to_mdp5_crtc_state(x) \ + container_of(x, struct mdp5_crtc_state, base) + +enum mdp5_intf_mode { + MDP5_INTF_MODE_NONE = 0, + + /* Modes used for DSI interface (INTF_DSI type): */ + MDP5_INTF_DSI_MODE_VIDEO, + MDP5_INTF_DSI_MODE_COMMAND, + + /* Modes used for WB interface (INTF_WB type): */ + MDP5_INTF_WB_MODE_BLOCK, + MDP5_INTF_WB_MODE_LINE, +}; + +struct mdp5_interface { + int idx; + int num; /* display interface number */ + enum mdp5_intf_type type; + enum mdp5_intf_mode mode; +}; + +struct mdp5_encoder { + struct drm_encoder base; + spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ + bool enabled; + uint32_t bsc; + + struct mdp5_interface *intf; + struct mdp5_ctl *ctl; +}; +#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) + +static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) +{ + WARN_ON(mdp5_kms->enable_count <= 0); + msm_writel(data, mdp5_kms->mmio + reg); +} + +static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) +{ + WARN_ON(mdp5_kms->enable_count <= 0); + return msm_readl(mdp5_kms->mmio + reg); +} + +static inline const char *stage2name(enum mdp_mixer_stage_id stage) +{ + static const char *names[] = { +#define NAME(n) [n] = #n + NAME(STAGE_UNUSED), NAME(STAGE_BASE), + NAME(STAGE0), NAME(STAGE1), NAME(STAGE2), + NAME(STAGE3), NAME(STAGE4), NAME(STAGE6), +#undef NAME + }; + return names[stage]; +} + +static inline const char *pipe2name(enum mdp5_pipe pipe) +{ + static const char *names[] = { +#define NAME(n) [SSPP_ ## n] = #n + NAME(VIG0), NAME(VIG1), NAME(VIG2), + NAME(RGB0), NAME(RGB1), NAME(RGB2), + NAME(DMA0), NAME(DMA1), + NAME(VIG3), NAME(RGB3), + NAME(CURSOR0), NAME(CURSOR1), +#undef NAME + }; + return names[pipe]; +} + +static inline int pipe2nclients(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_RGB0: + case SSPP_RGB1: + case SSPP_RGB2: + case SSPP_RGB3: + return 1; + default: + return 3; + } +} + +static inline uint32_t intf2err(int intf_num) +{ + switch (intf_num) { + case 0: return MDP5_IRQ_INTF0_UNDER_RUN; + case 1: return MDP5_IRQ_INTF1_UNDER_RUN; + case 2: return MDP5_IRQ_INTF2_UNDER_RUN; + case 3: return MDP5_IRQ_INTF3_UNDER_RUN; + default: return 0; + } +} + +static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer, + struct mdp5_interface *intf) +{ + /* + * In case of DSI Command Mode, the Ping Pong's read pointer IRQ + * acts as a Vblank signal. The Ping Pong buffer used is bound to + * layer mixer. + */ + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) + return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp; + + if (intf->type == INTF_WB) + return MDP5_IRQ_WB_2_DONE; + + switch (intf->num) { + case 0: return MDP5_IRQ_INTF0_VSYNC; + case 1: return MDP5_IRQ_INTF1_VSYNC; + case 2: return MDP5_IRQ_INTF2_VSYNC; + case 3: return MDP5_IRQ_INTF3_VSYNC; + default: return 0; + } +} + +static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer) +{ + return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; +} + +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +void mdp5_irq_preinstall(struct msm_kms *kms); +int mdp5_irq_postinstall(struct msm_kms *kms); +void mdp5_irq_uninstall(struct msm_kms *kms); +irqreturn_t mdp5_irq(struct msm_kms *kms); +int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); +void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); + +uint32_t mdp5_plane_get_flush(struct drm_plane *plane); +enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); +enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane); +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type); + +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); +uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); + +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc); +struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc); +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc); +void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); +struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id); + +struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf, struct mdp5_ctl *ctl); +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); +int mdp5_encoder_get_linecount(struct drm_encoder *encoder); +u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); + +#ifdef CONFIG_DRM_MSM_DSI +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder); +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder); +int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); +#else +static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} +static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ +} +static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +{ +} +static inline int mdp5_cmd_encoder_set_split_display( + struct drm_encoder *encoder, struct drm_encoder *slave_encoder) +{ + return -EINVAL; +} +#endif + +#endif /* __MDP5_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c new file mode 100644 index 000000000000..f2a0db7a8a03 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "msm_drv.h" +#include "mdp5_kms.h" + +/* + * If needed, this can become more specific: something like struct mdp5_mdss, + * which contains a 'struct msm_mdss base' member. + */ +struct msm_mdss { + struct drm_device *dev; + + void __iomem *mmio, *vbif; + + struct regulator *vdd; + + struct clk *ahb_clk; + struct clk *axi_clk; + struct clk *vsync_clk; + + struct { + volatile unsigned long enabled_mask; + struct irq_domain *domain; + } irqcontroller; +}; + +static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data) +{ + msm_writel(data, mdss->mmio + reg); +} + +static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg) +{ + return msm_readl(mdss->mmio + reg); +} + +static irqreturn_t mdss_irq(int irq, void *arg) +{ + struct msm_mdss *mdss = arg; + u32 intr; + + intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS); + + VERB("intr=%08x", intr); + + while (intr) { + irq_hw_number_t hwirq = fls(intr) - 1; + + generic_handle_irq(irq_find_mapping( + mdss->irqcontroller.domain, hwirq)); + intr &= ~(1 << hwirq); + } + + return IRQ_HANDLED; +} + +/* + * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc) + * can register to get their irq's delivered + */ + +#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \ + MDSS_HW_INTR_STATUS_INTR_DSI0 | \ + MDSS_HW_INTR_STATUS_INTR_DSI1 | \ + MDSS_HW_INTR_STATUS_INTR_HDMI | \ + MDSS_HW_INTR_STATUS_INTR_EDP) + +static void mdss_hw_mask_irq(struct irq_data *irqd) +{ + struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd); + + smp_mb__before_atomic(); + clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static void mdss_hw_unmask_irq(struct irq_data *irqd) +{ + struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd); + + smp_mb__before_atomic(); + set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static struct irq_chip mdss_hw_irq_chip = { + .name = "mdss", + .irq_mask = mdss_hw_mask_irq, + .irq_unmask = mdss_hw_unmask_irq, +}; + +static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct msm_mdss *mdss = d->host_data; + + if (!(VALID_IRQS & (1 << hwirq))) + return -EPERM; + + irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq); + irq_set_chip_data(irq, mdss); + + return 0; +} + +static const struct irq_domain_ops mdss_hw_irqdomain_ops = { + .map = mdss_hw_irqdomain_map, + .xlate = irq_domain_xlate_onecell, +}; + + +static int mdss_irq_domain_init(struct msm_mdss *mdss) +{ + struct device *dev = mdss->dev->dev; + struct irq_domain *d; + + d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, + mdss); + if (!d) { + dev_err(dev, "mdss irq domain add failed\n"); + return -ENXIO; + } + + mdss->irqcontroller.enabled_mask = 0; + mdss->irqcontroller.domain = d; + + return 0; +} + +int msm_mdss_enable(struct msm_mdss *mdss) +{ + DBG(""); + + clk_prepare_enable(mdss->ahb_clk); + if (mdss->axi_clk) + clk_prepare_enable(mdss->axi_clk); + if (mdss->vsync_clk) + clk_prepare_enable(mdss->vsync_clk); + + return 0; +} + +int msm_mdss_disable(struct msm_mdss *mdss) +{ + DBG(""); + + if (mdss->vsync_clk) + clk_disable_unprepare(mdss->vsync_clk); + if (mdss->axi_clk) + clk_disable_unprepare(mdss->axi_clk); + clk_disable_unprepare(mdss->ahb_clk); + + return 0; +} + +static int msm_mdss_get_clocks(struct msm_mdss *mdss) +{ + struct platform_device *pdev = to_platform_device(mdss->dev->dev); + + mdss->ahb_clk = msm_clk_get(pdev, "iface"); + if (IS_ERR(mdss->ahb_clk)) + mdss->ahb_clk = NULL; + + mdss->axi_clk = msm_clk_get(pdev, "bus"); + if (IS_ERR(mdss->axi_clk)) + mdss->axi_clk = NULL; + + mdss->vsync_clk = msm_clk_get(pdev, "vsync"); + if (IS_ERR(mdss->vsync_clk)) + mdss->vsync_clk = NULL; + + return 0; +} + +void msm_mdss_destroy(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_mdss *mdss = priv->mdss; + + if (!mdss) + return; + + irq_domain_remove(mdss->irqcontroller.domain); + mdss->irqcontroller.domain = NULL; + + regulator_disable(mdss->vdd); + + pm_runtime_disable(dev->dev); +} + +int msm_mdss_init(struct drm_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev); + struct msm_drm_private *priv = dev->dev_private; + struct msm_mdss *mdss; + int ret; + + DBG(""); + + if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss")) + return 0; + + mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL); + if (!mdss) { + ret = -ENOMEM; + goto fail; + } + + mdss->dev = dev; + + mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS"); + if (IS_ERR(mdss->mmio)) { + ret = PTR_ERR(mdss->mmio); + goto fail; + } + + mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); + if (IS_ERR(mdss->vbif)) { + ret = PTR_ERR(mdss->vbif); + goto fail; + } + + ret = msm_mdss_get_clocks(mdss); + if (ret) { + dev_err(dev->dev, "failed to get clocks: %d\n", ret); + goto fail; + } + + /* Regulator to enable GDSCs in downstream kernels */ + mdss->vdd = devm_regulator_get(dev->dev, "vdd"); + if (IS_ERR(mdss->vdd)) { + ret = PTR_ERR(mdss->vdd); + goto fail; + } + + ret = regulator_enable(mdss->vdd); + if (ret) { + dev_err(dev->dev, "failed to enable regulator vdd: %d\n", + ret); + goto fail; + } + + ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), + mdss_irq, 0, "mdss_isr", mdss); + if (ret) { + dev_err(dev->dev, "failed to init irq: %d\n", ret); + goto fail_irq; + } + + ret = mdss_irq_domain_init(mdss); + if (ret) { + dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret); + goto fail_irq; + } + + priv->mdss = mdss; + + pm_runtime_enable(dev->dev); + + return 0; +fail_irq: + regulator_disable(mdss->vdd); +fail: + return ret; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c new file mode 100644 index 000000000000..8a00991f03c7 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "mdp5_kms.h" + +/* + * As of now, there are only 2 combinations possible for source split: + * + * Left | Right + * -----|------ + * LM0 | LM1 + * LM2 | LM5 + * + */ +static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 }; + +static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm) +{ + int i; + int pair_lm; + + pair_lm = lm_right_pair[lm]; + if (pair_lm < 0) + return -EINVAL; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) { + struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i]; + + if (mixer->lm == pair_lm) + return mixer->idx; + } + + return -1; +} + +int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, + uint32_t caps, struct mdp5_hw_mixer **mixer, + struct mdp5_hw_mixer **r_mixer) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_mixer_state *new_state; + int i; + + if (IS_ERR(state)) + return PTR_ERR(state); + + new_state = &state->hwmixer; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) { + struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i]; + + /* + * skip if already in-use by a different CRTC. If there is a + * mixer already assigned to this CRTC, it means this call is + * a request to get an additional right mixer. Assume that the + * existing mixer is the 'left' one, and try to see if we can + * get its corresponding 'right' pair. + */ + if (new_state->hwmixer_to_crtc[cur->idx] && + new_state->hwmixer_to_crtc[cur->idx] != crtc) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + if (r_mixer) { + int pair_idx; + + pair_idx = get_right_pair_idx(mdp5_kms, cur->lm); + if (pair_idx < 0) + return -EINVAL; + + if (new_state->hwmixer_to_crtc[pair_idx]) + continue; + + *r_mixer = mdp5_kms->hwmixers[pair_idx]; + } + + /* + * prefer a pair-able LM over an unpairable one. We can + * switch the CRTC from Normal mode to Source Split mode + * without requiring a full modeset if we had already + * assigned this CRTC a pair-able LM. + * + * TODO: There will be assignment sequences which would + * result in the CRTC requiring a full modeset, even + * if we have the LM resources to prevent it. For a platform + * with a few displays, we don't run out of pair-able LMs + * so easily. For now, ignore the possibility of requiring + * a full modeset. + */ + if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR) + *mixer = cur; + } + + if (!(*mixer)) + return -ENOMEM; + + if (r_mixer && !(*r_mixer)) + return -ENOMEM; + + DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name); + + new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc; + if (r_mixer) { + DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm, + crtc->name); + new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc; + } + + return 0; +} + +void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) +{ + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_mixer_state *new_state = &state->hwmixer; + + if (!mixer) + return; + + if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx])) + return; + + DBG("%s: release from crtc %s", mixer->name, + new_state->hwmixer_to_crtc[mixer->idx]->name); + + new_state->hwmixer_to_crtc[mixer->idx] = NULL; +} + +void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) +{ + kfree(mixer); +} + +static const char * const mixer_names[] = { + "LM0", "LM1", "LM2", "LM3", "LM4", "LM5", +}; + +struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) +{ + struct mdp5_hw_mixer *mixer; + + mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); + if (!mixer) + return ERR_PTR(-ENOMEM); + + mixer->name = mixer_names[lm->id]; + mixer->lm = lm->id; + mixer->caps = lm->caps; + mixer->pp = lm->pp; + mixer->dspp = lm->dspp; + mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id); + + return mixer; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h new file mode 100644 index 000000000000..9be94f567fbd --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP5_LM_H__ +#define __MDP5_LM_H__ + +/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */ +struct mdp5_hw_mixer { + int idx; + + const char *name; + + int lm; /* the LM instance # */ + uint32_t caps; + int pp; + int dspp; + + uint32_t flush_mask; /* used to commit LM registers */ +}; + +/* global atomic state of assignment between CRTCs and Layer Mixers: */ +struct mdp5_hw_mixer_state { + struct drm_crtc *hwmixer_to_crtc[8]; +}; + +struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm); +void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); +int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, + uint32_t caps, struct mdp5_hw_mixer **mixer, + struct mdp5_hw_mixer **r_mixer); +void mdp5_mixer_release(struct drm_atomic_state *s, + struct mdp5_hw_mixer *mixer); + +#endif /* __MDP5_LM_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c new file mode 100644 index 000000000000..ff52c49095f9 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "mdp5_kms.h" + +int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, + uint32_t caps, uint32_t blkcfg, + struct mdp5_hw_pipe **hwpipe, + struct mdp5_hw_pipe **r_hwpipe) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state; + struct mdp5_hw_pipe_state *old_state, *new_state; + int i, j; + + state = mdp5_get_state(s); + if (IS_ERR(state)) + return PTR_ERR(state); + + /* grab old_state after mdp5_get_state(), since now we hold lock: */ + old_state = &mdp5_kms->state->hwpipe; + new_state = &state->hwpipe; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; + + /* skip if already in-use.. check both new and old state, + * since we cannot immediately re-use a pipe that is + * released in the current update in some cases: + * (1) mdp5 can have SMP (non-double-buffered) + * (2) hw pipe previously assigned to different CRTC + * (vblanks might not be aligned) + */ + if (new_state->hwpipe_to_plane[cur->idx] || + old_state->hwpipe_to_plane[cur->idx]) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + /* + * don't assign a cursor pipe to a plane that isn't going to + * be used as a cursor + */ + if (cur->caps & MDP_PIPE_CAP_CURSOR && + plane->type != DRM_PLANE_TYPE_CURSOR) + continue; + + /* possible candidate, take the one with the + * fewest unneeded caps bits set: + */ + if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) < + hweight_long((*hwpipe)->caps & ~caps))) { + bool r_found = false; + + if (r_hwpipe) { + for (j = i + 1; j < mdp5_kms->num_hwpipes; + j++) { + struct mdp5_hw_pipe *r_cur = + mdp5_kms->hwpipes[j]; + + /* reject different types of hwpipes */ + if (r_cur->caps != cur->caps) + continue; + + /* respect priority, eg. VIG0 > VIG1 */ + if (cur->pipe > r_cur->pipe) + continue; + + *r_hwpipe = r_cur; + r_found = true; + break; + } + } + + if (!r_hwpipe || r_found) + *hwpipe = cur; + } + } + + if (!(*hwpipe)) + return -ENOMEM; + + if (r_hwpipe && !(*r_hwpipe)) + return -ENOMEM; + + if (mdp5_kms->smp) { + int ret; + + /* We don't support SMP and 2 hwpipes/plane together */ + WARN_ON(r_hwpipe); + + DBG("%s: alloc SMP blocks", (*hwpipe)->name); + ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, + (*hwpipe)->pipe, blkcfg); + if (ret) + return -ENOMEM; + + (*hwpipe)->blkcfg = blkcfg; + } + + DBG("%s: assign to plane %s for caps %x", + (*hwpipe)->name, plane->name, caps); + new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane; + + if (r_hwpipe) { + DBG("%s: assign to right of plane %s for caps %x", + (*r_hwpipe)->name, plane->name, caps); + new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane; + } + + return 0; +} + +void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_pipe_state *new_state = &state->hwpipe; + + if (!hwpipe) + return; + + if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx])) + return; + + DBG("%s: release from plane %s", hwpipe->name, + new_state->hwpipe_to_plane[hwpipe->idx]->name); + + if (mdp5_kms->smp) { + DBG("%s: free SMP blocks", hwpipe->name); + mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe); + } + + new_state->hwpipe_to_plane[hwpipe->idx] = NULL; +} + +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) +{ + kfree(hwpipe); +} + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps) +{ + struct mdp5_hw_pipe *hwpipe; + + hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); + if (!hwpipe) + return ERR_PTR(-ENOMEM); + + hwpipe->name = pipe2name(pipe); + hwpipe->pipe = pipe; + hwpipe->reg_offset = reg_offset; + hwpipe->caps = caps; + hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); + + return hwpipe; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h new file mode 100644 index 000000000000..bb2b0ac7aa2b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP5_PIPE_H__ +#define __MDP5_PIPE_H__ + +/* TODO: Add SSPP_MAX in mdp5.xml.h */ +#define SSPP_MAX (SSPP_CURSOR1 + 1) + +/* represents a hw pipe, which is dynamically assigned to a plane */ +struct mdp5_hw_pipe { + int idx; + + const char *name; + enum mdp5_pipe pipe; + + uint32_t reg_offset; + uint32_t caps; + + uint32_t flush_mask; /* used to commit pipe registers */ + + /* number of smp blocks per plane, ie: + * nblks_y | (nblks_u << 8) | (nblks_v << 16) + */ + uint32_t blkcfg; +}; + +/* global atomic state of assignment between pipes and planes: */ +struct mdp5_hw_pipe_state { + struct drm_plane *hwpipe_to_plane[SSPP_MAX]; +}; + +int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, + uint32_t caps, uint32_t blkcfg, + struct mdp5_hw_pipe **hwpipe, + struct mdp5_hw_pipe **r_hwpipe); +void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps); +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); + +#endif /* __MDP5_PIPE_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c new file mode 100644 index 000000000000..98d4d7331767 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -0,0 +1,1137 @@ +/* + * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "mdp5_kms.h" + +struct mdp5_plane { + struct drm_plane base; + + uint32_t nformats; + uint32_t formats[32]; +}; +#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) + +static int mdp5_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_rect *src, struct drm_rect *dest); + +static struct mdp5_kms *get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static bool plane_enabled(struct drm_plane_state *state) +{ + return state->visible; +} + +static void mdp5_plane_destroy(struct drm_plane *plane) +{ + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); + + kfree(mdp5_plane); +} + +static void mdp5_plane_install_rotation_property(struct drm_device *dev, + struct drm_plane *plane) +{ + drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_0 | + DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); +} + +/* helper to install properties which are common to planes and crtcs */ +static void mdp5_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj) +{ + struct drm_device *dev = plane->dev; + struct msm_drm_private *dev_priv = dev->dev_private; + struct drm_property *prop; + +#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \ + prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \ + if (!prop) { \ + prop = drm_property_##fnc(dev, 0, #name, \ + ##__VA_ARGS__); \ + if (!prop) { \ + dev_warn(dev->dev, \ + "Create property %s failed\n", \ + #name); \ + return; \ + } \ + dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \ + } \ + drm_object_attach_property(&plane->base, prop, init_val); \ + } while (0) + +#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_range, min, max) + +#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_enum, name##_prop_enum_list, \ + ARRAY_SIZE(name##_prop_enum_list)) + + INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1); + + mdp5_plane_install_rotation_property(dev, plane); + +#undef INSTALL_RANGE_PROPERTY +#undef INSTALL_ENUM_PROPERTY +#undef INSTALL_PROPERTY +} + +static int mdp5_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct mdp5_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + pstate = to_mdp5_plane_state(state); + +#define SET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + pstate->name = (type)val; \ + DBG("Set property %s %d", #name, (type)val); \ + goto done; \ + } \ + } while (0) + + SET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + return ret; +#undef SET_PROPERTY +} + +static int mdp5_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, uint64_t *val) +{ + struct drm_device *dev = plane->dev; + struct mdp5_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + pstate = to_mdp5_plane_state(state); + +#define GET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + *val = pstate->name; \ + DBG("Get property %s %lld", #name, *val); \ + goto done; \ + } \ + } while (0) + + GET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + return ret; +#undef SET_PROPERTY +} + +static void +mdp5_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + struct mdp5_kms *mdp5_kms = get_kms(state->plane); + + drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? + pstate->hwpipe->name : "(null)"); + if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) + drm_printf(p, "\tright-hwpipe=%s\n", + pstate->r_hwpipe ? pstate->r_hwpipe->name : + "(null)"); + drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); + drm_printf(p, "\tzpos=%u\n", pstate->zpos); + drm_printf(p, "\talpha=%u\n", pstate->alpha); + drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); +} + +static void mdp5_plane_reset(struct drm_plane *plane) +{ + struct mdp5_plane_state *mdp5_state; + + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); + + kfree(to_mdp5_plane_state(plane->state)); + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + + /* assign default blend parameters */ + mdp5_state->alpha = 255; + mdp5_state->premultiplied = 0; + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + mdp5_state->zpos = STAGE_BASE; + else + mdp5_state->zpos = STAGE0 + drm_plane_index(plane); + + mdp5_state->base.plane = plane; + + plane->state = &mdp5_state->base; +} + +static struct drm_plane_state * +mdp5_plane_duplicate_state(struct drm_plane *plane) +{ + struct mdp5_plane_state *mdp5_state; + + if (WARN_ON(!plane->state)) + return NULL; + + mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), + sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); + + return &mdp5_state->base; +} + +static void mdp5_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + + if (state->fb) + drm_framebuffer_unreference(state->fb); + + kfree(pstate); +} + +static const struct drm_plane_funcs mdp5_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = mdp5_plane_destroy, + .atomic_set_property = mdp5_plane_atomic_set_property, + .atomic_get_property = mdp5_plane_atomic_get_property, + .reset = mdp5_plane_reset, + .atomic_duplicate_state = mdp5_plane_duplicate_state, + .atomic_destroy_state = mdp5_plane_destroy_state, + .atomic_print_state = mdp5_plane_atomic_print_state, +}; + +static int mdp5_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct msm_kms *kms = &mdp5_kms->base.base; + struct drm_framebuffer *fb = new_state->fb; + + if (!new_state->fb) + return 0; + + DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); + return msm_framebuffer_prepare(fb, kms->aspace); +} + +static void mdp5_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct msm_kms *kms = &mdp5_kms->base.base; + struct drm_framebuffer *fb = old_state->fb; + + if (!fb) + return; + + DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); + msm_framebuffer_cleanup(fb, kms->aspace); +} + +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) +static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, + struct drm_plane_state *state) +{ + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + struct drm_plane *plane = state->plane; + struct drm_plane_state *old_state = plane->state; + struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); + bool new_hwpipe = false; + bool need_right_hwpipe = false; + uint32_t max_width, max_height; + bool out_of_bounds = false; + uint32_t caps = 0; + struct drm_rect clip = {}; + int min_scale, max_scale; + int ret; + + DBG("%s: check (%d -> %d)", plane->name, + plane_enabled(old_state), plane_enabled(state)); + + max_width = config->hw->lm.max_width << 16; + max_height = config->hw->lm.max_height << 16; + + /* Make sure source dimensions are within bounds. */ + if (state->src_h > max_height) + out_of_bounds = true; + + if (state->src_w > max_width) { + /* If source split is supported, we can go up to 2x + * the max LM width, but we'd need to stage another + * hwpipe to the right LM. So, the drm_plane would + * consist of 2 hwpipes. + */ + if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT && + (state->src_w <= 2 * max_width)) + need_right_hwpipe = true; + else + out_of_bounds = true; + } + + if (out_of_bounds) { + struct drm_rect src = drm_plane_state_src(state); + DBG("Invalid source size "DRM_RECT_FP_FMT, + DRM_RECT_FP_ARG(&src)); + return -ERANGE; + } + + min_scale = FRAC_16_16(1, 8); + max_scale = FRAC_16_16(8, 1); + + if (crtc_state->enable) + drm_mode_get_hv_timing(&crtc_state->mode, + &clip.x2, &clip.y2); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip, + min_scale, max_scale, + true, true); + if (ret) + return ret; + + if (plane_enabled(state)) { + unsigned int rotation; + const struct mdp_format *format; + struct mdp5_kms *mdp5_kms = get_kms(plane); + uint32_t blkcfg = 0; + + format = to_mdp_format(msm_framebuffer_format(state->fb)); + if (MDP_FORMAT_IS_YUV(format)) + caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; + + if (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h)) + caps |= MDP_PIPE_CAP_SCALE; + + rotation = drm_rotation_simplify(state->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + + if (rotation & DRM_MODE_REFLECT_X) + caps |= MDP_PIPE_CAP_HFLIP; + + if (rotation & DRM_MODE_REFLECT_Y) + caps |= MDP_PIPE_CAP_VFLIP; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + caps |= MDP_PIPE_CAP_CURSOR; + + /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ + if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) + new_hwpipe = true; + + /* + * (re)allocte hw pipe if we're either requesting for 2 hw pipes + * or we're switching from 2 hw pipes to 1 hw pipe because the + * new src_w can be supported by 1 hw pipe itself. + */ + if ((need_right_hwpipe && !mdp5_state->r_hwpipe) || + (!need_right_hwpipe && mdp5_state->r_hwpipe)) + new_hwpipe = true; + + if (mdp5_kms->smp) { + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(state->fb)); + + blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format, + state->src_w >> 16, false); + + if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg)) + new_hwpipe = true; + } + + /* (re)assign hwpipe if needed, otherwise keep old one: */ + if (new_hwpipe) { + /* TODO maybe we want to re-assign hwpipe sometimes + * in cases when we no-longer need some caps to make + * it available for other planes? + */ + struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; + struct mdp5_hw_pipe *old_right_hwpipe = + mdp5_state->r_hwpipe; + struct mdp5_hw_pipe *new_hwpipe = NULL; + struct mdp5_hw_pipe *new_right_hwpipe = NULL; + + ret = mdp5_pipe_assign(state->state, plane, caps, + blkcfg, &new_hwpipe, + need_right_hwpipe ? + &new_right_hwpipe : NULL); + if (ret) { + DBG("%s: failed to assign hwpipe(s)!", + plane->name); + return ret; + } + + mdp5_state->hwpipe = new_hwpipe; + if (need_right_hwpipe) + mdp5_state->r_hwpipe = new_right_hwpipe; + else + /* + * set it to NULL so that the driver knows we + * don't have a right hwpipe when committing a + * new state + */ + mdp5_state->r_hwpipe = NULL; + + + mdp5_pipe_release(state->state, old_hwpipe); + mdp5_pipe_release(state->state, old_right_hwpipe); + } + } else { + mdp5_pipe_release(state->state, mdp5_state->hwpipe); + mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); + mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; + } + + return 0; +} + +static int mdp5_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + crtc = state->crtc ? state->crtc : plane->state->crtc; + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return mdp5_plane_atomic_check_with_state(crtc_state, state); +} + +static void mdp5_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + + DBG("%s: update", plane->name); + + if (plane_enabled(state)) { + int ret; + + ret = mdp5_plane_mode_set(plane, + state->crtc, state->fb, + &state->src, &state->dst); + /* atomic_check should have ensured that this doesn't fail */ + WARN_ON(ret < 0); + } +} + +static int mdp5_plane_atomic_async_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + struct drm_crtc_state *crtc_state; + struct drm_rect clip = {}; + int min_scale, max_scale; + int ret; + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + if (!crtc_state->active) + return -EINVAL; + + mdp5_state = to_mdp5_plane_state(state); + + /* don't use fast path if we don't have a hwpipe allocated yet */ + if (!mdp5_state->hwpipe) + return -EINVAL; + + /* only allow changing of position(crtc x/y or src x/y) in fast path */ + if (plane->state->crtc != state->crtc || + plane->state->src_w != state->src_w || + plane->state->src_h != state->src_h || + plane->state->crtc_w != state->crtc_w || + plane->state->crtc_h != state->crtc_h || + !plane->state->fb || + plane->state->fb != state->fb) + return -EINVAL; + + min_scale = FRAC_16_16(1, 8); + max_scale = FRAC_16_16(8, 1); + + if (crtc_state->enable) + drm_mode_get_hv_timing(&crtc_state->mode, + &clip.x2, &clip.y2); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip, + min_scale, max_scale, + true, true); + if (ret) + return ret; + + /* + * if the visibility of the plane changes (i.e, if the cursor is + * clipped out completely, we can't take the async path because + * we need to stage/unstage the plane from the Layer Mixer(s). We + * also assign/unassign the hwpipe(s) tied to the plane. We avoid + * taking the fast path for both these reasons. + */ + if (state->visible != plane->state->visible) + return -EINVAL; + + return 0; +} + +static void mdp5_plane_atomic_async_update(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + + if (plane_enabled(new_state)) { + struct mdp5_ctl *ctl; + struct mdp5_pipeline *pipeline = + mdp5_crtc_get_pipeline(plane->crtc); + int ret; + + ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb, + &new_state->src, &new_state->dst); + WARN_ON(ret < 0); + + ctl = mdp5_crtc_get_ctl(new_state->crtc); + + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); + } + + *to_mdp5_plane_state(plane->state) = + *to_mdp5_plane_state(new_state); +} + +static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { + .prepare_fb = mdp5_plane_prepare_fb, + .cleanup_fb = mdp5_plane_cleanup_fb, + .atomic_check = mdp5_plane_atomic_check, + .atomic_update = mdp5_plane_atomic_update, + .atomic_async_check = mdp5_plane_atomic_async_check, + .atomic_async_update = mdp5_plane_atomic_async_update, +}; + +static void set_scanout_locked(struct mdp5_kms *mdp5_kms, + enum mdp5_pipe pipe, + struct drm_framebuffer *fb) +{ + struct msm_kms *kms = &mdp5_kms->base.base; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), + MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | + MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), + MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | + MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 0)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 1)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 2)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 3)); +} + +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe) +{ + uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) & + ~MDP5_PIPE_OP_MODE_CSC_1_EN; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value); +} + +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + struct csc_cfg *csc) +{ + uint32_t i, mode = 0; /* RGB, no CSC */ + uint32_t *matrix; + + if (unlikely(!csc)) + return; + + if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV); + if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV); + mode |= MDP5_PIPE_OP_MODE_CSC_1_EN; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode); + + matrix = csc->matrix; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8])); + + for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) { + uint32_t *pre_clamp = csc->pre_clamp; + uint32_t *post_clamp = csc->post_clamp; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i), + MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i), + MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i])); + } +} + +#define PHASE_STEP_SHIFT 21 +#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */ + +static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) +{ + uint32_t unit; + + if (src == 0 || dst == 0) + return -EINVAL; + + /* + * PHASE_STEP_X/Y is coded on 26 bits (25:0), + * where 2^21 represents the unity "1" in fixed-point hardware design. + * This leaves 5 bits for the integer part (downscale case): + * -> maximum downscale ratio = 0b1_1111 = 31 + */ + if (src > (dst * DOWN_SCALE_RATIO_MAX)) + return -EOVERFLOW; + + unit = 1 << PHASE_STEP_SHIFT; + *out_phase = mult_frac(unit, src, dst); + + return 0; +} + +static int calc_scalex_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasex_steps[COMP_MAX]) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; + uint32_t phasex_step; + unsigned int hsub; + int ret; + + ret = calc_phase_step(src, dest, &phasex_step); + if (ret) { + dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); + return ret; + } + + hsub = drm_format_horz_chroma_subsampling(pixel_format); + + phasex_steps[COMP_0] = phasex_step; + phasex_steps[COMP_3] = phasex_step; + phasex_steps[COMP_1_2] = phasex_step / hsub; + + return 0; +} + +static int calc_scaley_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasey_steps[COMP_MAX]) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; + uint32_t phasey_step; + unsigned int vsub; + int ret; + + ret = calc_phase_step(src, dest, &phasey_step); + if (ret) { + dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); + return ret; + } + + vsub = drm_format_vert_chroma_subsampling(pixel_format); + + phasey_steps[COMP_0] = phasey_step; + phasey_steps[COMP_3] = phasey_step; + phasey_steps[COMP_1_2] = phasey_step / vsub; + + return 0; +} + +static uint32_t get_scale_config(const struct mdp_format *format, + uint32_t src, uint32_t dst, bool horz) +{ + bool scaling = format->is_yuv ? true : (src != dst); + uint32_t sub, pix_fmt = format->base.pixel_format; + uint32_t ya_filter, uv_filter; + bool yuv = format->is_yuv; + + if (!scaling) + return 0; + + if (yuv) { + sub = horz ? drm_format_horz_chroma_subsampling(pix_fmt) : + drm_format_vert_chroma_subsampling(pix_fmt); + uv_filter = ((src / sub) <= dst) ? + SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + } + ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + if (horz) + return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) | + COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter)); + else + return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) | + COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter)); +} + +static void calc_pixel_ext(const struct mdp_format *format, + uint32_t src, uint32_t dst, uint32_t phase_step[2], + int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX], + bool horz) +{ + bool scaling = format->is_yuv ? true : (src != dst); + int i; + + /* + * Note: + * We assume here that: + * 1. PCMN filter is used for downscale + * 2. bilinear filter is used for upscale + * 3. we are in a single pipe configuration + */ + + for (i = 0; i < COMP_MAX; i++) { + pix_ext_edge1[i] = 0; + pix_ext_edge2[i] = scaling ? 1 : 0; + } +} + +static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + const struct mdp_format *format, + uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX], + uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX]) +{ + uint32_t pix_fmt = format->base.pixel_format; + uint32_t lr, tb, req; + int i; + + for (i = 0; i < COMP_MAX; i++) { + uint32_t roi_w = src_w; + uint32_t roi_h = src_h; + + if (format->is_yuv && i == COMP_1_2) { + roi_w /= drm_format_horz_chroma_subsampling(pix_fmt); + roi_h /= drm_format_vert_chroma_subsampling(pix_fmt); + } + + lr = (pe_left[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) : + MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]); + + lr |= (pe_right[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) : + MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]); + + tb = (pe_top[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) : + MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]); + + tb |= (pe_bottom[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) : + MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]); + + req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w + + pe_left[i] + pe_right[i]); + + req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h + + pe_top[i] + pe_bottom[i]); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req); + + DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i, + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF), + FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT)); + + DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i, + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF), + FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM)); + } +} + +struct pixel_ext { + int left[COMP_MAX]; + int right[COMP_MAX]; + int top[COMP_MAX]; + int bottom[COMP_MAX]; +}; + +struct phase_step { + u32 x[COMP_MAX]; + u32 y[COMP_MAX]; +}; + +static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, + struct mdp5_hw_pipe *hwpipe, + struct drm_framebuffer *fb, + struct phase_step *step, + struct pixel_ext *pe, + u32 scale_config, u32 hdecm, u32 vdecm, + bool hflip, bool vflip, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + u32 src_img_w, u32 src_img_h, + u32 src_x, u32 src_y, + u32 src_w, u32 src_h) +{ + enum mdp5_pipe pipe = hwpipe->pipe; + bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(fb)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), + MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) | + MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), + MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | + MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), + MDP5_PIPE_SRC_XY_X(src_x) | + MDP5_PIPE_SRC_XY_Y(src_y)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), + MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | + MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), + MDP5_PIPE_OUT_XY_X(crtc_x) | + MDP5_PIPE_OUT_XY_Y(crtc_y)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), + MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | + MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | + MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | + MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | + MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | + MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), + MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | + MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | + MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | + MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), + (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | + (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | + COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) | + MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); + + /* not using secure mode: */ + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); + + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) + mdp5_write_pixel_ext(mdp5_kms, pipe, format, + src_w, pe->left, pe->right, + src_h, pe->top, pe->bottom); + + if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + step->x[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + step->y[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + step->x[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + step->y[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), + MDP5_PIPE_DECIMATION_VERT(vdecm) | + MDP5_PIPE_DECIMATION_HORZ(hdecm)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), + scale_config); + } + + if (hwpipe->caps & MDP_PIPE_CAP_CSC) { + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); + } + + set_scanout_locked(mdp5_kms, pipe, fb); +} + +static int mdp5_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_rect *src, struct drm_rect *dest) +{ + struct drm_plane_state *pstate = plane->state; + struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; + struct mdp5_kms *mdp5_kms = get_kms(plane); + enum mdp5_pipe pipe = hwpipe->pipe; + struct mdp5_hw_pipe *right_hwpipe; + const struct mdp_format *format; + uint32_t nplanes, config = 0; + struct phase_step step = { { 0 } }; + struct pixel_ext pe = { { 0 } }; + uint32_t hdecm = 0, vdecm = 0; + uint32_t pix_format; + unsigned int rotation; + bool vflip, hflip; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y; + uint32_t src_w, src_h; + uint32_t src_img_w, src_img_h; + int ret; + + nplanes = fb->format->num_planes; + + /* bad formats should already be rejected: */ + if (WARN_ON(nplanes > pipe2nclients(pipe))) + return -EINVAL; + + format = to_mdp_format(msm_framebuffer_format(fb)); + pix_format = format->base.pixel_format; + + src_x = src->x1; + src_y = src->y1; + src_w = drm_rect_width(src); + src_h = drm_rect_height(src); + + crtc_x = dest->x1; + crtc_y = dest->y1; + crtc_w = drm_rect_width(dest); + crtc_h = drm_rect_height(dest); + + /* src values are in Q16 fixed point, convert to integer: */ + src_x = src_x >> 16; + src_y = src_y >> 16; + src_w = src_w >> 16; + src_h = src_h >> 16; + + src_img_w = min(fb->width, src_w); + src_img_h = min(fb->height, src_h); + + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, + fb->base.id, src_x, src_y, src_w, src_h, + crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + + right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe; + if (right_hwpipe) { + /* + * if the plane comprises of 2 hw pipes, assume that the width + * is split equally across them. The only parameters that varies + * between the 2 pipes are src_x and crtc_x + */ + crtc_w /= 2; + src_w /= 2; + src_img_w /= 2; + } + + ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x); + if (ret) + return ret; + + ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y); + if (ret) + return ret; + + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { + calc_pixel_ext(format, src_w, crtc_w, step.x, + pe.left, pe.right, true); + calc_pixel_ext(format, src_h, crtc_h, step.y, + pe.top, pe.bottom, false); + } + + /* TODO calc hdecm, vdecm */ + + /* SCALE is used to both scale and up-sample chroma components */ + config |= get_scale_config(format, src_w, crtc_w, true); + config |= get_scale_config(format, src_h, crtc_h, false); + DBG("scale config = %x", config); + + rotation = drm_rotation_simplify(pstate->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + hflip = !!(rotation & DRM_MODE_REFLECT_X); + vflip = !!(rotation & DRM_MODE_REFLECT_Y); + + mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe, + config, hdecm, vdecm, hflip, vflip, + crtc_x, crtc_y, crtc_w, crtc_h, + src_img_w, src_img_h, + src_x, src_y, src_w, src_h); + if (right_hwpipe) + mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe, + config, hdecm, vdecm, hflip, vflip, + crtc_x + crtc_w, crtc_y, crtc_w, crtc_h, + src_img_w, src_img_h, + src_x + src_w, src_y, src_w, src_h); + + plane->fb = fb; + + return ret; +} + +/* + * Use this func and the one below only after the atomic state has been + * successfully swapped + */ +enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + + if (WARN_ON(!pstate->hwpipe)) + return SSPP_NONE; + + return pstate->hwpipe->pipe; +} + +enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + + if (!pstate->r_hwpipe) + return SSPP_NONE; + + return pstate->r_hwpipe->pipe; +} + +uint32_t mdp5_plane_get_flush(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + u32 mask; + + if (WARN_ON(!pstate->hwpipe)) + return 0; + + mask = pstate->hwpipe->flush_mask; + + if (pstate->r_hwpipe) + mask |= pstate->r_hwpipe->flush_mask; + + return mask; +} + +/* initialize plane */ +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type) +{ + struct drm_plane *plane = NULL; + struct mdp5_plane *mdp5_plane; + int ret; + + mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); + if (!mdp5_plane) { + ret = -ENOMEM; + goto fail; + } + + plane = &mdp5_plane->base; + + mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, + ARRAY_SIZE(mdp5_plane->formats), false); + + ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, + mdp5_plane->formats, mdp5_plane->nformats, + NULL, type, NULL); + if (ret) + goto fail; + + drm_plane_helper_add(plane, &mdp5_plane_helper_funcs); + + mdp5_plane_install_properties(plane, &plane->base); + + return plane; + +fail: + if (plane) + mdp5_plane_destroy(plane); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c new file mode 100644 index 000000000000..ae4983d9d0a5 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "mdp5_kms.h" +#include "mdp5_smp.h" + + +struct mdp5_smp { + struct drm_device *dev; + + uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */ + + int blk_cnt; + int blk_size; + + /* register cache */ + u32 alloc_w[22]; + u32 alloc_r[22]; + u32 pipe_reqprio_fifo_wm0[SSPP_MAX]; + u32 pipe_reqprio_fifo_wm1[SSPP_MAX]; + u32 pipe_reqprio_fifo_wm2[SSPP_MAX]; +}; + +static inline +struct mdp5_kms *get_kms(struct mdp5_smp *smp) +{ + struct msm_drm_private *priv = smp->dev->dev_private; + + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) +{ +#define CID_UNUSED 0 + + if (WARN_ON(plane >= pipe2nclients(pipe))) + return CID_UNUSED; + + /* + * Note on SMP clients: + * For ViG pipes, fetch Y/Cr/Cb-components clients are always + * consecutive, and in that order. + * + * e.g.: + * if mdp5_cfg->smp.clients[SSPP_VIG0] = N, + * Y plane's client ID is N + * Cr plane's client ID is N + 1 + * Cb plane's client ID is N + 2 + */ + + return mdp5_cfg->smp.clients[pipe] + plane; +} + +/* allocate blocks for the specified request: */ +static int smp_request_block(struct mdp5_smp *smp, + struct mdp5_smp_state *state, + u32 cid, int nblks) +{ + void *cs = state->client_state[cid]; + int i, avail, cnt = smp->blk_cnt; + uint8_t reserved; + + /* we shouldn't be requesting blocks for an in-use client: */ + WARN_ON(bitmap_weight(cs, cnt) > 0); + + reserved = smp->reserved[cid]; + + if (reserved) { + nblks = max(0, nblks - reserved); + DBG("%d MMBs allocated (%d reserved)", nblks, reserved); + } + + avail = cnt - bitmap_weight(state->state, cnt); + if (nblks > avail) { + dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", + nblks, avail); + return -ENOSPC; + } + + for (i = 0; i < nblks; i++) { + int blk = find_first_zero_bit(state->state, cnt); + set_bit(blk, cs); + set_bit(blk, state->state); + } + + return 0; +} + +static void set_fifo_thresholds(struct mdp5_smp *smp, + enum mdp5_pipe pipe, int nblks) +{ + u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); + u32 val; + + /* 1/4 of SMP pool that is being fetched */ + val = (nblks * smp_entries_per_blk) / 4; + + smp->pipe_reqprio_fifo_wm0[pipe] = val * 1; + smp->pipe_reqprio_fifo_wm1[pipe] = val * 2; + smp->pipe_reqprio_fifo_wm2[pipe] = val * 3; +} + +/* + * NOTE: looks like if horizontal decimation is used (if we supported that) + * then the width used to calculate SMP block requirements is the post- + * decimated width. Ie. SMP buffering sits downstream of decimation (which + * presumably happens during the dma from scanout buffer). + */ +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); + int i, hsub, nplanes, nlines; + u32 fmt = format->base.pixel_format; + uint32_t blkcfg = 0; + + nplanes = drm_format_num_planes(fmt); + hsub = drm_format_horz_chroma_subsampling(fmt); + + /* different if BWC (compressed framebuffer?) enabled: */ + nlines = 2; + + /* Newer MDPs have split/packing logic, which fetches sub-sampled + * U and V components (splits them from Y if necessary) and packs + * them together, writes to SMP using a single client. + */ + if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { + fmt = DRM_FORMAT_NV24; + nplanes = 2; + + /* if decimation is enabled, HW decimates less on the + * sub sampled chroma components + */ + if (hdecim && (hsub > 1)) + hsub = 1; + } + + for (i = 0; i < nplanes; i++) { + int n, fetch_stride, cpp; + + cpp = drm_format_plane_cpp(fmt, i); + fetch_stride = width * cpp / (i ? hsub : 1); + + n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); + + /* for hw rev v1.00 */ + if (rev == 0) + n = roundup_pow_of_two(n); + + blkcfg |= (n << (8 * i)); + } + + return blkcfg; +} + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + int n = blkcfg & 0xff; + + if (!n) + continue; + + DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); + ret = smp_request_block(smp, state, cid, n); + if (ret) { + dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", + n, ret); + return ret; + } + + blkcfg >>= 8; + } + + state->assigned |= (1 << pipe); + + return 0; +} + +/* Release SMP blocks for all clients of the pipe */ +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe) +{ + int i; + int cnt = smp->blk_cnt; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + void *cs = state->client_state[cid]; + + /* update global state: */ + bitmap_andnot(state->state, state->state, cs, cnt); + + /* clear client's state */ + bitmap_zero(cs, cnt); + } + + state->released |= (1 << pipe); +} + +/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to + * happen after scanout completes. + */ +static unsigned update_smp_state(struct mdp5_smp *smp, + u32 cid, mdp5_smp_state_t *assigned) +{ + int cnt = smp->blk_cnt; + unsigned nblks = 0; + u32 blk, val; + + for_each_set_bit(blk, *assigned, cnt) { + int idx = blk / 3; + int fld = blk % 3; + + val = smp->alloc_w[idx]; + + switch (fld) { + case 0: + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); + break; + case 1: + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); + break; + case 2: + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); + break; + } + + smp->alloc_w[idx] = val; + smp->alloc_r[idx] = val; + + nblks++; + } + + return nblks; +} + +static void write_smp_alloc_regs(struct mdp5_smp *smp) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int i, num_regs; + + num_regs = smp->blk_cnt / 3 + 1; + + for (i = 0; i < num_regs; i++) { + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i), + smp->alloc_w[i]); + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i), + smp->alloc_r[i]); + } +} + +static void write_smp_fifo_regs(struct mdp5_smp *smp) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int i; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + enum mdp5_pipe pipe = hwpipe->pipe; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), + smp->pipe_reqprio_fifo_wm0[pipe]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), + smp->pipe_reqprio_fifo_wm1[pipe]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), + smp->pipe_reqprio_fifo_wm2[pipe]); + } +} + +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) +{ + enum mdp5_pipe pipe; + + for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) { + unsigned i, nblks = 0; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + void *cs = state->client_state[cid]; + + nblks += update_smp_state(smp, cid, cs); + + DBG("assign %s:%u, %u blks", + pipe2name(pipe), i, nblks); + } + + set_fifo_thresholds(smp, pipe, nblks); + } + + write_smp_alloc_regs(smp); + write_smp_fifo_regs(smp); + + state->assigned = 0; +} + +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) +{ + enum mdp5_pipe pipe; + + for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) { + DBG("release %s", pipe2name(pipe)); + set_fifo_thresholds(smp, pipe, 0); + } + + write_smp_fifo_regs(smp); + + state->released = 0; +} + +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct mdp5_hw_pipe_state *hwpstate; + struct mdp5_smp_state *state; + int total = 0, i, j; + + drm_printf(p, "name\tinuse\tplane\n"); + drm_printf(p, "----\t-----\t-----\n"); + + if (drm_can_sleep()) + drm_modeset_lock(&mdp5_kms->state_lock, NULL); + + /* grab these *after* we hold the state_lock */ + hwpstate = &mdp5_kms->state->hwpipe; + state = &mdp5_kms->state->smp; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx]; + enum mdp5_pipe pipe = hwpipe->pipe; + for (j = 0; j < pipe2nclients(pipe); j++) { + u32 cid = pipe2client(pipe, j); + void *cs = state->client_state[cid]; + int inuse = bitmap_weight(cs, smp->blk_cnt); + + drm_printf(p, "%s:%d\t%d\t%s\n", + pipe2name(pipe), j, inuse, + plane ? plane->name : NULL); + + total += inuse; + } + } + + drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt); + drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt - + bitmap_weight(state->state, smp->blk_cnt)); + + if (drm_can_sleep()) + drm_modeset_unlock(&mdp5_kms->state_lock); +} + +void mdp5_smp_destroy(struct mdp5_smp *smp) +{ + kfree(smp); +} + +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) +{ + struct mdp5_smp_state *state = &mdp5_kms->state->smp; + struct mdp5_smp *smp = NULL; + int ret; + + smp = kzalloc(sizeof(*smp), GFP_KERNEL); + if (unlikely(!smp)) { + ret = -ENOMEM; + goto fail; + } + + smp->dev = mdp5_kms->dev; + smp->blk_cnt = cfg->mmb_count; + smp->blk_size = cfg->mmb_size; + + /* statically tied MMBs cannot be re-allocated: */ + bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); + memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); + + return smp; +fail: + if (smp) + mdp5_smp_destroy(smp); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h new file mode 100644 index 000000000000..b41d0448fbe8 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP5_SMP_H__ +#define __MDP5_SMP_H__ + +#include + +#include "msm_drv.h" + +/* + * SMP - Shared Memory Pool: + * + * SMP blocks are shared between all the clients, where each plane in + * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on + * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. + * + * Based on the size of the attached scanout buffer, a certain # of + * blocks must be allocated to that client out of the shared pool. + * + * In some hw, some blocks are statically allocated for certain pipes + * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). + * + * + * Atomic SMP State: + * + * On atomic updates that modify SMP configuration, the state is cloned + * (copied) and modified. For test-only, or in cases where atomic + * update fails (or if we hit ww_mutex deadlock/backoff condition) the + * new state is simply thrown away. + * + * Because the SMP registers are not double buffered, updates are a + * two step process: + * + * 1) in _prepare_commit() we configure things (via read-modify-write) + * for the newly assigned pipes, so we don't take away blocks + * assigned to pipes that are still scanning out + * 2) in _complete_commit(), after vblank/etc, we clear things for the + * released clients, since at that point old pipes are no longer + * scanning out. + */ +struct mdp5_smp_state { + /* global state of what blocks are in use: */ + mdp5_smp_state_t state; + + /* per client state of what blocks they are using: */ + mdp5_smp_state_t client_state[MAX_CLIENTS]; + + /* assigned pipes (hw updated at _prepare_commit()): */ + unsigned long assigned; + + /* released pipes (hw updated at _complete_commit()): */ + unsigned long released; +}; + +struct mdp5_kms; +struct mdp5_smp; + +/* + * SMP module prototypes: + * mdp5_smp_init() returns a SMP @handler, + * which is then used to call the other mdp5_smp_*(handler, ...) functions. + */ + +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, + const struct mdp5_smp_block *cfg); +void mdp5_smp_destroy(struct mdp5_smp *smp); + +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); + +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim); + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg); +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe); + +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); + +#endif /* __MDP5_SMP_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h new file mode 100644 index 000000000000..1494c407be44 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h @@ -0,0 +1,104 @@ +#ifndef MDP_COMMON_XML +#define MDP_COMMON_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) + +Copyright (C) 2013-2017 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mdp_chroma_samp_type { + CHROMA_FULL = 0, + CHROMA_H2V1 = 1, + CHROMA_H1V2 = 2, + CHROMA_420 = 3, +}; + +enum mdp_fetch_type { + MDP_PLANE_INTERLEAVED = 0, + MDP_PLANE_PLANAR = 1, + MDP_PLANE_PSEUDO_PLANAR = 2, +}; + +enum mdp_mixer_stage_id { + STAGE_UNUSED = 0, + STAGE_BASE = 1, + STAGE0 = 2, + STAGE1 = 3, + STAGE2 = 4, + STAGE3 = 5, + STAGE4 = 6, + STAGE5 = 7, + STAGE6 = 8, + STAGE_MAX = 8, +}; + +enum mdp_alpha_type { + FG_CONST = 0, + BG_CONST = 1, + FG_PIXEL = 2, + BG_PIXEL = 3, +}; + +enum mdp_component_type { + COMP_0 = 0, + COMP_1_2 = 1, + COMP_3 = 2, + COMP_MAX = 3, +}; + +enum mdp_bpc { + BPC1 = 0, + BPC5 = 1, + BPC6 = 2, + BPC8 = 3, +}; + +enum mdp_bpc_alpha { + BPC1A = 0, + BPC4A = 1, + BPC6A = 2, + BPC8A = 3, +}; + + +#endif /* MDP_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c new file mode 100644 index 000000000000..b4a8aa4490ee --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_format.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "msm_drv.h" +#include "mdp_kms.h" + +static struct csc_cfg csc_convert[CSC_MAX] = { + [CSC_RGB2RGB] = { + .type = CSC_RGB2RGB, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x0, 0x0, 0x0 }, + .post_bias = { 0x0, 0x0, 0x0 }, + .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + }, + [CSC_YUV2RGB] = { + .type = CSC_YUV2RGB, + .matrix = { + 0x0254, 0x0000, 0x0331, + 0x0254, 0xff37, 0xfe60, + 0x0254, 0x0409, 0x0000 + }, + .pre_bias = { 0xfff0, 0xff80, 0xff80 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, + [CSC_RGB2YUV] = { + .type = CSC_RGB2YUV, + .matrix = { + 0x0083, 0x0102, 0x0032, + 0x1fb5, 0x1f6c, 0x00e1, + 0x00e1, 0x1f45, 0x1fdc + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x10, 0x80, 0x80 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 }, + }, + [CSC_YUV2YUV] = { + .type = CSC_YUV2YUV, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, +}; + +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ + .base = { .pixel_format = DRM_FORMAT_ ## name }, \ + .bpc_a = BPC ## a ## A, \ + .bpc_r = BPC ## r, \ + .bpc_g = BPC ## g, \ + .bpc_b = BPC ## b, \ + .unpack = { e0, e1, e2, e3 }, \ + .alpha_enable = alpha, \ + .unpack_tight = tight, \ + .cpp = c, \ + .unpack_count = cnt, \ + .fetch_type = fp, \ + .chroma_sample = cs, \ + .is_yuv = yuv, \ +} + +#define BPC0A 0 + +/* + * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking + * mdp_get_rgb_formats()'s implementation. + */ +static const struct mdp_format formats[] = { + /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ + FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + + /* --- RGB formats above / YUV formats below this line --- */ + + /* 2 plane YUV */ + FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), + FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), + FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + /* 1 plane YUV */ + FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + /* 3 plane YUV */ + FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), + FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), +}; + +/* + * Note: + * @rgb_only must be set to true, when requesting + * supported formats for RGB pipes. + */ +uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, + bool rgb_only) +{ + uint32_t i; + for (i = 0; i < ARRAY_SIZE(formats); i++) { + const struct mdp_format *f = &formats[i]; + + if (i == max_formats) + break; + + if (rgb_only && MDP_FORMAT_IS_YUV(f)) + break; + + pixel_formats[i] = f->base.pixel_format; + } + + return i; +} + +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) +{ + int i; + for (i = 0; i < ARRAY_SIZE(formats); i++) { + const struct mdp_format *f = &formats[i]; + if (f->base.pixel_format == format) + return &f->base; + } + return NULL; +} + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type) +{ + if (unlikely(WARN_ON(type >= CSC_MAX))) + return NULL; + + return &csc_convert[type]; +} diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c new file mode 100644 index 000000000000..64287304054d --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_kms.c @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +#include "msm_drv.h" +#include "mdp_kms.h" + + +struct mdp_irq_wait { + struct mdp_irq irq; + int count; +}; + +static DECLARE_WAIT_QUEUE_HEAD(wait_event); + +static DEFINE_SPINLOCK(list_lock); + +static void update_irq(struct mdp_kms *mdp_kms) +{ + struct mdp_irq *irq; + uint32_t irqmask = mdp_kms->vblank_mask; + + assert_spin_locked(&list_lock); + + list_for_each_entry(irq, &mdp_kms->irq_list, node) + irqmask |= irq->irqmask; + + mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask); + mdp_kms->cur_irq_mask = irqmask; +} + +/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder + * link changes, this must be called to figure out the new global irqmask + */ +void mdp_irq_update(struct mdp_kms *mdp_kms) +{ + unsigned long flags; + spin_lock_irqsave(&list_lock, flags); + update_irq(mdp_kms); + spin_unlock_irqrestore(&list_lock, flags); +} + +void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status) +{ + struct mdp_irq *handler, *n; + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + mdp_kms->in_irq = true; + list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { + if (handler->irqmask & status) { + spin_unlock_irqrestore(&list_lock, flags); + handler->irq(handler, handler->irqmask & status); + spin_lock_irqsave(&list_lock, flags); + } + } + mdp_kms->in_irq = false; + update_irq(mdp_kms); + spin_unlock_irqrestore(&list_lock, flags); + +} + +void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) +{ + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + if (enable) + mdp_kms->vblank_mask |= mask; + else + mdp_kms->vblank_mask &= ~mask; + update_irq(mdp_kms); + spin_unlock_irqrestore(&list_lock, flags); +} + +static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp_irq_wait *wait = + container_of(irq, struct mdp_irq_wait, irq); + wait->count--; + wake_up_all(&wait_event); +} + +void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask) +{ + struct mdp_irq_wait wait = { + .irq = { + .irq = wait_irq, + .irqmask = irqmask, + }, + .count = 1, + }; + mdp_irq_register(mdp_kms, &wait.irq); + wait_event_timeout(wait_event, (wait.count <= 0), + msecs_to_jiffies(100)); + mdp_irq_unregister(mdp_kms, &wait.irq); +} + +void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) +{ + unsigned long flags; + bool needs_update = false; + + spin_lock_irqsave(&list_lock, flags); + + if (!irq->registered) { + irq->registered = true; + list_add(&irq->node, &mdp_kms->irq_list); + needs_update = !mdp_kms->in_irq; + } + + spin_unlock_irqrestore(&list_lock, flags); + + if (needs_update) + mdp_irq_update(mdp_kms); +} + +void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) +{ + unsigned long flags; + bool needs_update = false; + + spin_lock_irqsave(&list_lock, flags); + + if (irq->registered) { + irq->registered = false; + list_del(&irq->node); + needs_update = !mdp_kms->in_irq; + } + + spin_unlock_irqrestore(&list_lock, flags); + + if (needs_update) + mdp_irq_update(mdp_kms); +} diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h new file mode 100644 index 000000000000..1185487e7e5e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __MDP_KMS_H__ +#define __MDP_KMS_H__ + +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "mdp_common.xml.h" + +struct mdp_kms; + +struct mdp_kms_funcs { + struct msm_kms_funcs base; + void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +}; + +struct mdp_kms { + struct msm_kms base; + + const struct mdp_kms_funcs *funcs; + + /* irq handling: */ + bool in_irq; + struct list_head irq_list; /* list of mdp4_irq */ + uint32_t vblank_mask; /* irq bits set for userspace vblank */ + uint32_t cur_irq_mask; /* current irq mask */ +}; +#define to_mdp_kms(x) container_of(x, struct mdp_kms, base) + +static inline void mdp_kms_init(struct mdp_kms *mdp_kms, + const struct mdp_kms_funcs *funcs) +{ + mdp_kms->funcs = funcs; + INIT_LIST_HEAD(&mdp_kms->irq_list); + msm_kms_init(&mdp_kms->base, &funcs->base); +} + +/* + * irq helpers: + */ + +/* For transiently registering for different MDP irqs that various parts + * of the KMS code need during setup/configuration. These are not + * necessarily the same as what drm_vblank_get/put() are requesting, and + * the hysteresis in drm_vblank_put() is not necessarily desirable for + * internal housekeeping related irq usage. + */ +struct mdp_irq { + struct list_head node; + uint32_t irqmask; + bool registered; + void (*irq)(struct mdp_irq *irq, uint32_t irqstatus); +}; + +void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status); +void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable); +void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); +void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); +void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); +void mdp_irq_update(struct mdp_kms *mdp_kms); + +/* + * pixel format helpers: + */ + +struct mdp_format { + struct msm_format base; + enum mdp_bpc bpc_r, bpc_g, bpc_b; + enum mdp_bpc_alpha bpc_a; + uint8_t unpack[4]; + bool alpha_enable, unpack_tight; + uint8_t cpp, unpack_count; + enum mdp_fetch_type fetch_type; + enum mdp_chroma_samp_type chroma_sample; + bool is_yuv; +}; +#define to_mdp_format(x) container_of(x, struct mdp_format, base) +#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) + +uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); + +/* MDP capabilities */ +#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ +#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ +#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ +#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */ + +/* MDP pipe capabilities */ +#define MDP_PIPE_CAP_HFLIP BIT(0) +#define MDP_PIPE_CAP_VFLIP BIT(1) +#define MDP_PIPE_CAP_SCALE BIT(2) +#define MDP_PIPE_CAP_CSC BIT(3) +#define MDP_PIPE_CAP_DECIMATION BIT(4) +#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) +#define MDP_PIPE_CAP_CURSOR BIT(6) + +/* MDP layer mixer caps */ +#define MDP_LM_CAP_DISPLAY BIT(0) +#define MDP_LM_CAP_WB BIT(1) +#define MDP_LM_CAP_PAIR BIT(2) + +static inline bool pipe_supports_yuv(uint32_t pipe_caps) +{ + return (pipe_caps & MDP_PIPE_CAP_SCALE) && + (pipe_caps & MDP_PIPE_CAP_CSC); +} + +enum csc_type { + CSC_RGB2RGB = 0, + CSC_YUV2RGB, + CSC_RGB2YUV, + CSC_YUV2YUV, + CSC_MAX +}; + +struct csc_cfg { + enum csc_type type; + uint32_t matrix[9]; + uint32_t pre_bias[3]; + uint32_t post_bias[3]; + uint32_t pre_clamp[6]; + uint32_t post_clamp[6]; +}; + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type); + +#endif /* __MDP_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h deleted file mode 100644 index 576cea30d391..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ /dev/null @@ -1,1174 +0,0 @@ -#ifndef MDP4_XML -#define MDP4_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) - -Copyright (C) 2013-2017 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mdp4_pipe { - VG1 = 0, - VG2 = 1, - RGB1 = 2, - RGB2 = 3, - RGB3 = 4, - VG3 = 5, - VG4 = 6, -}; - -enum mdp4_mixer { - MIXER0 = 0, - MIXER1 = 1, - MIXER2 = 2, -}; - -enum mdp4_intf { - INTF_LCDC_DTV = 0, - INTF_DSI_VIDEO = 1, - INTF_DSI_CMD = 2, - INTF_EBI2_TV = 3, -}; - -enum mdp4_cursor_format { - CURSOR_ARGB = 1, - CURSOR_XRGB = 2, -}; - -enum mdp4_frame_format { - FRAME_LINEAR = 0, - FRAME_TILE_ARGB_4X4 = 1, - FRAME_TILE_YCBCR_420 = 2, -}; - -enum mdp4_scale_unit { - SCALE_FIR = 0, - SCALE_MN_PHASE = 1, - SCALE_PIXEL_RPT = 2, -}; - -enum mdp4_dma { - DMA_P = 0, - DMA_S = 1, - DMA_E = 2, -}; - -#define MDP4_IRQ_OVERLAY0_DONE 0x00000001 -#define MDP4_IRQ_OVERLAY1_DONE 0x00000002 -#define MDP4_IRQ_DMA_S_DONE 0x00000004 -#define MDP4_IRQ_DMA_E_DONE 0x00000008 -#define MDP4_IRQ_DMA_P_DONE 0x00000010 -#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020 -#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040 -#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080 -#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100 -#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200 -#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400 -#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800 -#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000 -#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000 -#define MDP4_IRQ_OVERLAY2_DONE 0x40000000 -#define REG_MDP4_VERSION 0x00000000 -#define MDP4_VERSION_MINOR__MASK 0x00ff0000 -#define MDP4_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDP4_VERSION_MINOR(uint32_t val) -{ - return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK; -} -#define MDP4_VERSION_MAJOR__MASK 0xff000000 -#define MDP4_VERSION_MAJOR__SHIFT 24 -static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val) -{ - return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK; -} - -#define REG_MDP4_OVLP0_KICK 0x00000004 - -#define REG_MDP4_OVLP1_KICK 0x00000008 - -#define REG_MDP4_OVLP2_KICK 0x000000d0 - -#define REG_MDP4_DMA_P_KICK 0x0000000c - -#define REG_MDP4_DMA_S_KICK 0x00000010 - -#define REG_MDP4_DMA_E_KICK 0x00000014 - -#define REG_MDP4_DISP_STATUS 0x00000018 - -#define REG_MDP4_DISP_INTF_SEL 0x00000038 -#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003 -#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0 -static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val) -{ - return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK; -} -#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c -#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2 -static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val) -{ - return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK; -} -#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030 -#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4 -static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) -{ - return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK; -} -#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040 -#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080 - -#define REG_MDP4_RESET_STATUS 0x0000003c - -#define REG_MDP4_READ_CNFG 0x0000004c - -#define REG_MDP4_INTR_ENABLE 0x00000050 - -#define REG_MDP4_INTR_STATUS 0x00000054 - -#define REG_MDP4_INTR_CLEAR 0x00000058 - -#define REG_MDP4_EBI2_LCD0 0x00000060 - -#define REG_MDP4_EBI2_LCD1 0x00000064 - -#define REG_MDP4_PORTMAP_MODE 0x00000070 - -#define REG_MDP4_CS_CONTROLLER0 0x000000c0 - -#define REG_MDP4_CS_CONTROLLER1 0x000000c4 - -#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000 - -#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc - -#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 -#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 -#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 -#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 -#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 -#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 -#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 -#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000 - -#define REG_MDP4_VG2_SRC_FORMAT 0x00030050 - -#define REG_MDP4_VG2_CONST_COLOR 0x00031008 - -#define REG_MDP4_OVERLAY_FLUSH 0x00018000 -#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001 -#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002 -#define MDP4_OVERLAY_FLUSH_VG1 0x00000004 -#define MDP4_OVERLAY_FLUSH_VG2 0x00000008 -#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010 -#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020 - -static inline uint32_t __offset_OVLP(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00010000; - case 1: return 0x00018000; - case 2: return 0x00088000; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); } -#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK; -} -#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); } - -static inline uint32_t __offset_STAGE(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000104; - case 1: return 0x00000124; - case 2: return 0x00000144; - case 3: return 0x00000160; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } -#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 -#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 -static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; -} -#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004 -#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 -#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 -#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 -static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; -} -#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040 -#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080 -#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100 -#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200 - -static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t __offset_STAGE_CO3(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00001004; - case 1: return 0x00001404; - case 2: return 0x00001804; - case 3: return 0x00001b84; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } -#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001 - -static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); } - - -static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } - -#define REG_MDP4_DMA_P_OP_MODE 0x00090070 - -static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; } - -static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } - -#define REG_MDP4_DMA_S_OP_MODE 0x000a0028 - -static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; } - -static inline uint32_t __offset_DMA(enum mdp4_dma idx) -{ - switch (idx) { - case DMA_P: return 0x00090000; - case DMA_S: return 0x000a0000; - case DMA_E: return 0x000b0000; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } -#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 -#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 -static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; -} -#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c -#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 -static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; -} -#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 -#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 -static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; -} -#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080 -#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00 -#define MDP4_DMA_CONFIG_PACK__SHIFT 8 -static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val) -{ - return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK; -} -#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000 -#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000 - -static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); } -#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK; -} -#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); } -#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK; -} -#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); } -#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f -#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK; -} -#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000 -#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK; -} - -static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); } -#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff -#define MDP4_DMA_CURSOR_POS_X__SHIFT 0 -static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK; -} -#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000 -#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16 -static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK; -} - -static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); } -#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001 -#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006 -#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1 -static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val) -{ - return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK; -} -#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008 - -static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); } - - -static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } -#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK; -} -#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; } -#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 -#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK; -} -#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff -#define MDP4_PIPE_SRC_XY_X__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; } -#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK; -} -#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; } -#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 -#define MDP4_PIPE_DST_XY_Y__SHIFT 16 -static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK; -} -#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff -#define MDP4_PIPE_DST_XY_X__SHIFT 0 -static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; } -#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff -#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK; -} -#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 -#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; } -#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff -#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK; -} -#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 -#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } -#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK; -} -#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } -#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 -#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c -#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 -#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 -#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 -#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 -#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000 -#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000 -#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 -#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000 -#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 -#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000 -#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000 -#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; } -#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff -#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK; -} -#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 -#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK; -} -#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 -#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK; -} -#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 -#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; } -#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 -#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 -#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c -#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2 -static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val) -{ - return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK; -} -#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030 -#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4 -static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val) -{ - return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK; -} -#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 -#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 -#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 -#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000 -#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000 -#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000 -#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000 -#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 -#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 - -static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; } - - -static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } - -#define REG_MDP4_LCDC 0x000c0000 - -#define REG_MDP4_LCDC_ENABLE 0x000c0000 - -#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004 -#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff -#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0 -static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val) -{ - return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK; -} -#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000 -#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16 -static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val) -{ - return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK; -} - -#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008 - -#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c - -#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010 -#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff -#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0 -static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val) -{ - return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK; -} -#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000 -#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16 -static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val) -{ - return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK; -} - -#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014 - -#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018 - -#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c -#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK; -} -#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK; -} -#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 - -#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020 - -#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024 - -#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028 - -#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c -#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff -#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0 -static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val) -{ - return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK; -} -#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 - -#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030 - -#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034 - -#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038 -#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001 -#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002 -#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004 - -#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000 -#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004 -#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008 -#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040 -#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000 - -static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; } - -static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; } -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00 -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000 -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000 -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK; -} - -static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; } -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00 -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000 -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK; -} - -#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024 - -#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080 - -#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108 - -#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100 -#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010 -#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040 -#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080 - -#define REG_MDP4_DTV 0x000d0000 - -#define REG_MDP4_DTV_ENABLE 0x000d0000 - -#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004 -#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff -#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0 -static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val) -{ - return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK; -} -#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000 -#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16 -static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val) -{ - return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK; -} - -#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008 - -#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c - -#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018 -#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff -#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0 -static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val) -{ - return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK; -} -#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000 -#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16 -static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val) -{ - return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK; -} - -#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c - -#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020 - -#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c -#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK; -} -#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK; -} -#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 - -#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030 - -#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038 - -#define REG_MDP4_DTV_BORDER_CLR 0x000d0040 - -#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044 -#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff -#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0 -static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val) -{ - return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK; -} -#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 - -#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048 - -#define REG_MDP4_DTV_TEST_CNTL 0x000d004c - -#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050 -#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001 -#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002 -#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004 - -#define REG_MDP4_DSI 0x000e0000 - -#define REG_MDP4_DSI_ENABLE 0x000e0000 - -#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004 -#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff -#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0 -static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val) -{ - return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK; -} -#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000 -#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16 -static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val) -{ - return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK; -} - -#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008 - -#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c - -#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010 -#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff -#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0 -static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val) -{ - return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK; -} -#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000 -#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16 -static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val) -{ - return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK; -} - -#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014 - -#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018 - -#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c -#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK; -} -#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK; -} -#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 - -#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020 - -#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024 - -#define REG_MDP4_DSI_BORDER_CLR 0x000e0028 - -#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c -#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff -#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0 -static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val) -{ - return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK; -} -#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 - -#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030 - -#define REG_MDP4_DSI_TEST_CNTL 0x000e0034 - -#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038 -#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001 -#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002 -#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004 - - -#endif /* MDP4_XML */ diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c deleted file mode 100644 index 6e5e1aa54ce1..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ /dev/null @@ -1,670 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include -#include -#include - -#include "mdp4_kms.h" - -struct mdp4_crtc { - struct drm_crtc base; - char name[8]; - int id; - int ovlp; - enum mdp4_dma dma; - bool enabled; - - /* which mixer/encoder we route output to: */ - int mixer; - - struct { - spinlock_t lock; - bool stale; - uint32_t width, height; - uint32_t x, y; - - /* next cursor to scan-out: */ - uint32_t next_iova; - struct drm_gem_object *next_bo; - - /* current cursor being scanned out: */ - struct drm_gem_object *scanout_bo; - } cursor; - - - /* if there is a pending flip, these will be non-null: */ - struct drm_pending_vblank_event *event; - - /* Bits have been flushed at the last commit, - * used to decide if a vsync has happened since last commit. - */ - u32 flushed_mask; - -#define PENDING_CURSOR 0x1 -#define PENDING_FLIP 0x2 - atomic_t pending; - - /* for unref'ing cursor bo's after scanout completes: */ - struct drm_flip_work unref_cursor_work; - - struct mdp_irq vblank; - struct mdp_irq err; -}; -#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) - -static struct mdp4_kms *get_kms(struct drm_crtc *crtc) -{ - struct msm_drm_private *priv = crtc->dev->dev_private; - return to_mdp4_kms(to_mdp_kms(priv->kms)); -} - -static void request_pending(struct drm_crtc *crtc, uint32_t pending) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - - atomic_or(pending, &mdp4_crtc->pending); - mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); -} - -static void crtc_flush(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - struct drm_plane *plane; - uint32_t flush = 0; - - drm_atomic_crtc_for_each_plane(plane, crtc) { - enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); - flush |= pipe2flush(pipe_id); - } - - flush |= ovlp2flush(mdp4_crtc->ovlp); - - DBG("%s: flush=%08x", mdp4_crtc->name, flush); - - mdp4_crtc->flushed_mask = flush; - - mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); -} - -/* if file!=NULL, this is preclose potential cancel-flip path */ -static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_pending_vblank_event *event; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - event = mdp4_crtc->event; - if (event) { - mdp4_crtc->event = NULL; - DBG("%s: send event: %p", mdp4_crtc->name, event); - drm_crtc_send_vblank_event(crtc, event); - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -static void unref_cursor_worker(struct drm_flip_work *work, void *val) -{ - struct mdp4_crtc *mdp4_crtc = - container_of(work, struct mdp4_crtc, unref_cursor_work); - struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); - struct msm_kms *kms = &mdp4_kms->base.base; - - msm_gem_put_iova(val, kms->aspace); - drm_gem_object_put_unlocked(val); -} - -static void mdp4_crtc_destroy(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - - drm_crtc_cleanup(crtc); - drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); - - kfree(mdp4_crtc); -} - -/* statically (for now) map planes to mixer stage (z-order): */ -static const int idxs[] = { - [VG1] = 1, - [VG2] = 2, - [RGB1] = 0, - [RGB2] = 0, - [RGB3] = 0, - [VG3] = 3, - [VG4] = 4, - -}; - -/* setup mixer config, for which we need to consider all crtc's and - * the planes attached to them - * - * TODO may possibly need some extra locking here - */ -static void setup_mixer(struct mdp4_kms *mdp4_kms) -{ - struct drm_mode_config *config = &mdp4_kms->dev->mode_config; - struct drm_crtc *crtc; - uint32_t mixer_cfg = 0; - static const enum mdp_mixer_stage_id stages[] = { - STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, - }; - - list_for_each_entry(crtc, &config->crtc_list, head) { - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct drm_plane *plane; - - drm_atomic_crtc_for_each_plane(plane, crtc) { - enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); - int idx = idxs[pipe_id]; - mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, - pipe_id, stages[idx]); - } - } - - mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); -} - -static void blend_setup(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - struct drm_plane *plane; - int i, ovlp = mdp4_crtc->ovlp; - bool alpha[4]= { false, false, false, false }; - - mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); - - drm_atomic_crtc_for_each_plane(plane, crtc) { - enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); - int idx = idxs[pipe_id]; - if (idx > 0) { - const struct mdp_format *format = - to_mdp_format(msm_framebuffer_format(plane->fb)); - alpha[idx-1] = format->alpha_enable; - } - } - - for (i = 0; i < 4; i++) { - uint32_t op; - - if (alpha[i]) { - op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) | - MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) | - MDP4_OVLP_STAGE_OP_BG_INV_ALPHA; - } else { - op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | - MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST); - } - - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); - } - - setup_mixer(mdp4_kms); -} - -static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - enum mdp4_dma dma = mdp4_crtc->dma; - int ovlp = mdp4_crtc->ovlp; - struct drm_display_mode *mode; - - if (WARN_ON(!crtc->state)) - return; - - mode = &crtc->state->adjusted_mode; - - DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mdp4_crtc->name, mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), - MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | - MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); - - /* take data from pipe: */ - mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); - mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0); - mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), - MDP4_DMA_DST_SIZE_WIDTH(0) | - MDP4_DMA_DST_SIZE_HEIGHT(0)); - - mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), - MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | - MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0); - - mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); - - if (dma == DMA_E) { - mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); - mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); - mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); - } -} - -static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - - DBG("%s", mdp4_crtc->name); - - if (WARN_ON(!mdp4_crtc->enabled)) - return; - - /* Disable/save vblank irq handling before power is disabled */ - drm_crtc_vblank_off(crtc); - - mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); - mdp4_disable(mdp4_kms); - - mdp4_crtc->enabled = false; -} - -static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - - DBG("%s", mdp4_crtc->name); - - if (WARN_ON(mdp4_crtc->enabled)) - return; - - mdp4_enable(mdp4_kms); - - /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); - - mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); - - crtc_flush(crtc); - - mdp4_crtc->enabled = true; -} - -static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - DBG("%s: check", mdp4_crtc->name); - // TODO anything else to check? - return 0; -} - -static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - DBG("%s: begin", mdp4_crtc->name); -} - -static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct drm_device *dev = crtc->dev; - unsigned long flags; - - DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); - - WARN_ON(mdp4_crtc->event); - - spin_lock_irqsave(&dev->event_lock, flags); - mdp4_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - - blend_setup(crtc); - crtc_flush(crtc); - request_pending(crtc, PENDING_FLIP); -} - -#define CURSOR_WIDTH 64 -#define CURSOR_HEIGHT 64 - -/* called from IRQ to update cursor related registers (if needed). The - * cursor registers, other than x/y position, appear not to be double - * buffered, and changing them other than from vblank seems to trigger - * underflow. - */ -static void update_cursor(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - struct msm_kms *kms = &mdp4_kms->base.base; - enum mdp4_dma dma = mdp4_crtc->dma; - unsigned long flags; - - spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); - if (mdp4_crtc->cursor.stale) { - struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; - struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; - uint64_t iova = mdp4_crtc->cursor.next_iova; - - if (next_bo) { - /* take a obj ref + iova ref when we start scanning out: */ - drm_gem_object_get(next_bo); - msm_gem_get_iova(next_bo, kms->aspace, &iova); - - /* enable cursor: */ - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), - MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) | - MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height)); - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), - MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) | - MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); - } else { - /* disable cursor: */ - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), - mdp4_kms->blank_cursor_iova); - } - - /* and drop the iova ref + obj rev when done scanning out: */ - if (prev_bo) - drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo); - - mdp4_crtc->cursor.scanout_bo = next_bo; - mdp4_crtc->cursor.stale = false; - } - - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), - MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | - MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); - - spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); -} - -static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, uint32_t handle, - uint32_t width, uint32_t height) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - struct msm_kms *kms = &mdp4_kms->base.base; - struct drm_device *dev = crtc->dev; - struct drm_gem_object *cursor_bo, *old_bo; - unsigned long flags; - uint64_t iova; - int ret; - - if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { - dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); - return -EINVAL; - } - - if (handle) { - cursor_bo = drm_gem_object_lookup(file_priv, handle); - if (!cursor_bo) - return -ENOENT; - } else { - cursor_bo = NULL; - } - - if (cursor_bo) { - ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova); - if (ret) - goto fail; - } else { - iova = 0; - } - - spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); - old_bo = mdp4_crtc->cursor.next_bo; - mdp4_crtc->cursor.next_bo = cursor_bo; - mdp4_crtc->cursor.next_iova = iova; - mdp4_crtc->cursor.width = width; - mdp4_crtc->cursor.height = height; - mdp4_crtc->cursor.stale = true; - spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); - - if (old_bo) { - /* drop our previous reference: */ - drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); - } - - request_pending(crtc, PENDING_CURSOR); - - return 0; - -fail: - drm_gem_object_put_unlocked(cursor_bo); - return ret; -} - -static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - unsigned long flags; - - spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); - mdp4_crtc->cursor.x = x; - mdp4_crtc->cursor.y = y; - spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); - - crtc_flush(crtc); - request_pending(crtc, PENDING_CURSOR); - - return 0; -} - -static const struct drm_crtc_funcs mdp4_crtc_funcs = { - .set_config = drm_atomic_helper_set_config, - .destroy = mdp4_crtc_destroy, - .page_flip = drm_atomic_helper_page_flip, - .cursor_set = mdp4_crtc_cursor_set, - .cursor_move = mdp4_crtc_cursor_move, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, -}; - -static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { - .mode_set_nofb = mdp4_crtc_mode_set_nofb, - .atomic_check = mdp4_crtc_atomic_check, - .atomic_begin = mdp4_crtc_atomic_begin, - .atomic_flush = mdp4_crtc_atomic_flush, - .atomic_enable = mdp4_crtc_atomic_enable, - .atomic_disable = mdp4_crtc_atomic_disable, -}; - -static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); - struct drm_crtc *crtc = &mdp4_crtc->base; - struct msm_drm_private *priv = crtc->dev->dev_private; - unsigned pending; - - mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); - - pending = atomic_xchg(&mdp4_crtc->pending, 0); - - if (pending & PENDING_FLIP) { - complete_flip(crtc, NULL); - } - - if (pending & PENDING_CURSOR) { - update_cursor(crtc); - drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); - } -} - -static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); - struct drm_crtc *crtc = &mdp4_crtc->base; - DBG("%s: error: %08x", mdp4_crtc->name, irqstatus); - crtc_flush(crtc); -} - -static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - int ret; - - ret = drm_crtc_vblank_get(crtc); - if (ret) - return; - - ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, - !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) & - mdp4_crtc->flushed_mask), - msecs_to_jiffies(50)); - if (ret <= 0) - dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id); - - mdp4_crtc->flushed_mask = 0; - - drm_crtc_vblank_put(crtc); -} - -uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - return mdp4_crtc->vblank.irqmask; -} - -/* set dma config, ie. the format the encoder wants. */ -void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - - mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config); -} - -/* set interface for routing crtc->encoder: */ -void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - uint32_t intf_sel; - - intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL); - - switch (mdp4_crtc->dma) { - case DMA_P: - intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK; - intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf); - break; - case DMA_S: - intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK; - intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf); - break; - case DMA_E: - intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK; - intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf); - break; - } - - if (intf == INTF_DSI_VIDEO) { - intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD; - intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO; - } else if (intf == INTF_DSI_CMD) { - intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO; - intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD; - } - - mdp4_crtc->mixer = mixer; - - blend_setup(crtc); - - DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel); - - mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); -} - -void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc) -{ - /* wait_for_flush_done is the only case for now. - * Later we will have command mode CRTC to wait for - * other event. - */ - mdp4_crtc_wait_for_flush_done(crtc); -} - -static const char *dma_names[] = { - "DMA_P", "DMA_S", "DMA_E", -}; - -/* initialize crtc */ -struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, - struct drm_plane *plane, int id, int ovlp_id, - enum mdp4_dma dma_id) -{ - struct drm_crtc *crtc = NULL; - struct mdp4_crtc *mdp4_crtc; - - mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); - if (!mdp4_crtc) - return ERR_PTR(-ENOMEM); - - crtc = &mdp4_crtc->base; - - mdp4_crtc->id = id; - - mdp4_crtc->ovlp = ovlp_id; - mdp4_crtc->dma = dma_id; - - mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma); - mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq; - - mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma); - mdp4_crtc->err.irq = mdp4_crtc_err_irq; - - snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d", - dma_names[dma_id], ovlp_id); - - spin_lock_init(&mdp4_crtc->cursor.lock); - - drm_flip_work_init(&mdp4_crtc->unref_cursor_work, - "unref cursor", unref_cursor_worker); - - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, - NULL); - drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); - plane->crtc = crtc; - - return crtc; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c deleted file mode 100644 index 6a1ebdace391..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. - * Copyright (c) 2014, Inforce Computing. All rights reserved. - * - * Author: Vinay Simha - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include "mdp4_kms.h" - -struct mdp4_dsi_encoder { - struct drm_encoder base; - struct drm_panel *panel; - bool enabled; -}; -#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base) - -static struct mdp4_kms *get_kms(struct drm_encoder *encoder) -{ - struct msm_drm_private *priv = encoder->dev->dev_private; - return to_mdp4_kms(to_mdp_kms(priv->kms)); -} - -static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); - - drm_encoder_cleanup(encoder); - kfree(mdp4_dsi_encoder); -} - -static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = { - .destroy = mdp4_dsi_encoder_destroy, -}; - -static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdp4_kms *mdp4_kms = get_kms(encoder); - uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol; - uint32_t display_v_start, display_v_end; - uint32_t hsync_start_x, hsync_end_x; - - mode = adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - ctrl_pol = 0; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW; - /* probably need to get DATA_EN polarity from panel.. */ - - dsi_hsync_skew = 0; /* get this from panel? */ - - hsync_start_x = (mode->htotal - mode->hsync_start); - hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; - - vsync_period = mode->vtotal * mode->htotal; - vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; - display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew; - display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1; - - mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL, - MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | - MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal)); - mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period); - mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len); - mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL, - MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) | - MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x)); - mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start); - mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end); - - mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol); - mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR, - MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY | - MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff)); - mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL, - MDP4_DSI_ACTIVE_HCTL_START(0) | - MDP4_DSI_ACTIVE_HCTL_END(0)); - mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew); - mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0); - mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0); - mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0); -} - -static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder) -{ - struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - - if (!mdp4_dsi_encoder->enabled) - return; - - mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); - - mdp4_dsi_encoder->enabled = false; -} - -static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder) -{ - struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - - if (mdp4_dsi_encoder->enabled) - return; - - mdp4_crtc_set_config(encoder->crtc, - MDP4_DMA_CONFIG_PACK_ALIGN_MSB | - MDP4_DMA_CONFIG_DEFLKR_EN | - MDP4_DMA_CONFIG_DITHER_EN | - MDP4_DMA_CONFIG_R_BPC(BPC8) | - MDP4_DMA_CONFIG_G_BPC(BPC8) | - MDP4_DMA_CONFIG_B_BPC(BPC8) | - MDP4_DMA_CONFIG_PACK(0x21)); - - mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0); - - mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1); - - mdp4_dsi_encoder->enabled = true; -} - -static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = { - .mode_set = mdp4_dsi_encoder_mode_set, - .disable = mdp4_dsi_encoder_disable, - .enable = mdp4_dsi_encoder_enable, -}; - -/* initialize encoder */ -struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) -{ - struct drm_encoder *encoder = NULL; - struct mdp4_dsi_encoder *mdp4_dsi_encoder; - int ret; - - mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL); - if (!mdp4_dsi_encoder) { - ret = -ENOMEM; - goto fail; - } - - encoder = &mdp4_dsi_encoder->base; - - drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); - drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs); - - return encoder; - -fail: - if (encoder) - mdp4_dsi_encoder_destroy(encoder); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c deleted file mode 100644 index ba8e587f734b..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include "mdp4_kms.h" - -struct mdp4_dtv_encoder { - struct drm_encoder base; - struct clk *hdmi_clk; - struct clk *mdp_clk; - unsigned long int pixclock; - bool enabled; - uint32_t bsc; -}; -#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base) - -static struct mdp4_kms *get_kms(struct drm_encoder *encoder) -{ - struct msm_drm_private *priv = encoder->dev->dev_private; - return to_mdp4_kms(to_mdp_kms(priv->kms)); -} - -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include -/* not ironically named at all.. no, really.. */ -static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) -{ - struct drm_device *dev = mdp4_dtv_encoder->base.dev; - struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0"); - - if (!dtv_pdata) { - dev_err(dev->dev, "could not find dtv pdata\n"); - return; - } - - if (dtv_pdata->bus_scale_table) { - mdp4_dtv_encoder->bsc = msm_bus_scale_register_client( - dtv_pdata->bus_scale_table); - DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc); - DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save); - if (dtv_pdata->lcdc_power_save) - dtv_pdata->lcdc_power_save(1); - } -} - -static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) -{ - if (mdp4_dtv_encoder->bsc) { - msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc); - mdp4_dtv_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) -{ - if (mdp4_dtv_encoder->bsc) { - DBG("set bus scaling: %d", idx); - msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} -static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} -static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {} -#endif - -static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - bs_fini(mdp4_dtv_encoder); - drm_encoder_cleanup(encoder); - kfree(mdp4_dtv_encoder); -} - -static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { - .destroy = mdp4_dtv_encoder_destroy, -}; - -static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; - uint32_t display_v_start, display_v_end; - uint32_t hsync_start_x, hsync_end_x; - - mode = adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - mdp4_dtv_encoder->pixclock = mode->clock * 1000; - - DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock); - - ctrl_pol = 0; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW; - /* probably need to get DATA_EN polarity from panel.. */ - - dtv_hsync_skew = 0; /* get this from panel? */ - - hsync_start_x = (mode->htotal - mode->hsync_start); - hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; - - vsync_period = mode->vtotal * mode->htotal; - vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; - display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; - display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; - - mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL, - MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | - MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal)); - mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period); - mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len); - mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL, - MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) | - MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x)); - mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start); - mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end); - mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0); - mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR, - MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY | - MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff)); - mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew); - mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol); - mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL, - MDP4_DTV_ACTIVE_HCTL_START(0) | - MDP4_DTV_ACTIVE_HCTL_END(0)); - mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0); - mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); -} - -static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) -{ - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - - if (WARN_ON(!mdp4_dtv_encoder->enabled)) - return; - - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); - - clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); - clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); - - bs_set(mdp4_dtv_encoder, 0); - - mdp4_dtv_encoder->enabled = false; -} - -static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - unsigned long pc = mdp4_dtv_encoder->pixclock; - int ret; - - if (WARN_ON(mdp4_dtv_encoder->enabled)) - return; - - mdp4_crtc_set_config(encoder->crtc, - MDP4_DMA_CONFIG_R_BPC(BPC8) | - MDP4_DMA_CONFIG_G_BPC(BPC8) | - MDP4_DMA_CONFIG_B_BPC(BPC8) | - MDP4_DMA_CONFIG_PACK(0x21)); - mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); - - bs_set(mdp4_dtv_encoder, 1); - - DBG("setting mdp_clk=%lu", pc); - - ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); - if (ret) - dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n", - pc, ret); - - ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); - if (ret) - dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); - - ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); - if (ret) - dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); - - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); - - mdp4_dtv_encoder->enabled = true; -} - -static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { - .mode_set = mdp4_dtv_encoder_mode_set, - .enable = mdp4_dtv_encoder_enable, - .disable = mdp4_dtv_encoder_disable, -}; - -long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) -{ - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate); -} - -/* initialize encoder */ -struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) -{ - struct drm_encoder *encoder = NULL; - struct mdp4_dtv_encoder *mdp4_dtv_encoder; - int ret; - - mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL); - if (!mdp4_dtv_encoder) { - ret = -ENOMEM; - goto fail; - } - - encoder = &mdp4_dtv_encoder->base; - - drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); - - mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); - if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { - dev_err(dev->dev, "failed to get hdmi_clk\n"); - ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); - goto fail; - } - - mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); - if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { - dev_err(dev->dev, "failed to get tv_clk\n"); - ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); - goto fail; - } - - bs_init(mdp4_dtv_encoder); - - return encoder; - -fail: - if (encoder) - mdp4_dtv_encoder_destroy(encoder); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c deleted file mode 100644 index b764d7f10312..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include - -#include "msm_drv.h" -#include "mdp4_kms.h" - -void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask) -{ - mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR, - irqmask ^ (irqmask & old_irqmask)); - mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); -} - -static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler); - static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); - extern bool dumpstate; - - DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); - - if (dumpstate && __ratelimit(&rs)) { - struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev); - drm_state_dump(mdp4_kms->dev, &p); - } -} - -void mdp4_irq_preinstall(struct msm_kms *kms) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - mdp4_enable(mdp4_kms); - mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); - mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); - mdp4_disable(mdp4_kms); -} - -int mdp4_irq_postinstall(struct msm_kms *kms) -{ - struct mdp_kms *mdp_kms = to_mdp_kms(kms); - struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); - struct mdp_irq *error_handler = &mdp4_kms->error_handler; - - error_handler->irq = mdp4_irq_error_handler; - error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | - MDP4_IRQ_EXTERNAL_INTF_UDERRUN; - - mdp_irq_register(mdp_kms, error_handler); - - return 0; -} - -void mdp4_irq_uninstall(struct msm_kms *kms) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - mdp4_enable(mdp4_kms); - mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); - mdp4_disable(mdp4_kms); -} - -irqreturn_t mdp4_irq(struct msm_kms *kms) -{ - struct mdp_kms *mdp_kms = to_mdp_kms(kms); - struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); - struct drm_device *dev = mdp4_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - unsigned int id; - uint32_t status, enable; - - enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE); - status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable; - mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); - - VERB("status=%08x", status); - - mdp_dispatch_irqs(mdp_kms, status); - - for (id = 0; id < priv->num_crtcs; id++) - if (status & mdp4_crtc_vblank(priv->crtcs[id])) - drm_handle_vblank(dev, id); - - return IRQ_HANDLED; -} - -int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - - mdp4_enable(mdp4_kms); - mdp_update_vblank_mask(to_mdp_kms(kms), - mdp4_crtc_vblank(crtc), true); - mdp4_disable(mdp4_kms); - - return 0; -} - -void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - - mdp4_enable(mdp4_kms); - mdp_update_vblank_mask(to_mdp_kms(kms), - mdp4_crtc_vblank(crtc), false); - mdp4_disable(mdp4_kms); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c deleted file mode 100644 index 4b646bf9c214..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - - -#include "msm_drv.h" -#include "msm_gem.h" -#include "msm_mmu.h" -#include "mdp4_kms.h" - -static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); - -static int mdp4_hw_init(struct msm_kms *kms) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - struct drm_device *dev = mdp4_kms->dev; - uint32_t version, major, minor, dmap_cfg, vg_cfg; - unsigned long clk; - int ret = 0; - - pm_runtime_get_sync(dev->dev); - - mdp4_enable(mdp4_kms); - version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); - mdp4_disable(mdp4_kms); - - major = FIELD(version, MDP4_VERSION_MAJOR); - minor = FIELD(version, MDP4_VERSION_MINOR); - - DBG("found MDP4 version v%d.%d", major, minor); - - if (major != 4) { - dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", - major, minor); - ret = -ENXIO; - goto out; - } - - mdp4_kms->rev = minor; - - if (mdp4_kms->rev > 1) { - mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); - mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); - } - - mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3); - - /* max read pending cmd config, 3 pending requests: */ - mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222); - - clk = clk_get_rate(mdp4_kms->clk); - - if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) { - dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */ - vg_cfg = 0x47; /* 16 bytes-burs x 8 req */ - } else { - dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */ - vg_cfg = 0x43; /* 16 bytes-burst x 4 req */ - } - - DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg); - - mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg); - mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg); - - if (mdp4_kms->rev >= 2) - mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1); - mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0); - - /* disable CSC matrix / YUV by default: */ - mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0); - mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0); - mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0); - mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0); - - if (mdp4_kms->rev > 1) - mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); - - dev->mode_config.allow_fb_modifiers = true; - -out: - pm_runtime_put_sync(dev->dev); - - return ret; -} - -static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - - mdp4_enable(mdp4_kms); - - /* see 119ecb7fd */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) - drm_crtc_vblank_get(crtc); -} - -static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - - /* see 119ecb7fd */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) - drm_crtc_vblank_put(crtc); - - mdp4_disable(mdp4_kms); -} - -static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms, - struct drm_crtc *crtc) -{ - mdp4_crtc_wait_for_commit_done(crtc); -} - -static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, - struct drm_encoder *encoder) -{ - /* if we had >1 encoder, we'd need something more clever: */ - switch (encoder->encoder_type) { - case DRM_MODE_ENCODER_TMDS: - return mdp4_dtv_round_pixclk(encoder, rate); - case DRM_MODE_ENCODER_LVDS: - case DRM_MODE_ENCODER_DSI: - default: - return rate; - } -} - -static const char * const iommu_ports[] = { - "mdp_port0_cb0", "mdp_port1_cb0", -}; - -static void mdp4_destroy(struct msm_kms *kms) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - struct device *dev = mdp4_kms->dev->dev; - struct msm_gem_address_space *aspace = kms->aspace; - - if (mdp4_kms->blank_cursor_iova) - msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace); - drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); - - if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_put(aspace); - } - - if (mdp4_kms->rpm_enabled) - pm_runtime_disable(dev); - - kfree(mdp4_kms); -} - -static const struct mdp_kms_funcs kms_funcs = { - .base = { - .hw_init = mdp4_hw_init, - .irq_preinstall = mdp4_irq_preinstall, - .irq_postinstall = mdp4_irq_postinstall, - .irq_uninstall = mdp4_irq_uninstall, - .irq = mdp4_irq, - .enable_vblank = mdp4_enable_vblank, - .disable_vblank = mdp4_disable_vblank, - .prepare_commit = mdp4_prepare_commit, - .complete_commit = mdp4_complete_commit, - .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done, - .get_format = mdp_get_format, - .round_pixclk = mdp4_round_pixclk, - .destroy = mdp4_destroy, - }, - .set_irqmask = mdp4_set_irqmask, -}; - -int mdp4_disable(struct mdp4_kms *mdp4_kms) -{ - DBG(""); - - clk_disable_unprepare(mdp4_kms->clk); - if (mdp4_kms->pclk) - clk_disable_unprepare(mdp4_kms->pclk); - clk_disable_unprepare(mdp4_kms->lut_clk); - if (mdp4_kms->axi_clk) - clk_disable_unprepare(mdp4_kms->axi_clk); - - return 0; -} - -int mdp4_enable(struct mdp4_kms *mdp4_kms) -{ - DBG(""); - - clk_prepare_enable(mdp4_kms->clk); - if (mdp4_kms->pclk) - clk_prepare_enable(mdp4_kms->pclk); - clk_prepare_enable(mdp4_kms->lut_clk); - if (mdp4_kms->axi_clk) - clk_prepare_enable(mdp4_kms->axi_clk); - - return 0; -} - - -static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, - int intf_type) -{ - struct drm_device *dev = mdp4_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - struct drm_encoder *encoder; - struct drm_connector *connector; - struct device_node *panel_node; - int dsi_id; - int ret; - - switch (intf_type) { - case DRM_MODE_ENCODER_LVDS: - /* - * bail out early if there is no panel node (no need to - * initialize LCDC encoder and LVDS connector) - */ - panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0); - if (!panel_node) - return 0; - - encoder = mdp4_lcdc_encoder_init(dev, panel_node); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct LCDC encoder\n"); - return PTR_ERR(encoder); - } - - /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */ - encoder->possible_crtcs = 1 << DMA_P; - - connector = mdp4_lvds_connector_init(dev, panel_node, encoder); - if (IS_ERR(connector)) { - dev_err(dev->dev, "failed to initialize LVDS connector\n"); - return PTR_ERR(connector); - } - - priv->encoders[priv->num_encoders++] = encoder; - priv->connectors[priv->num_connectors++] = connector; - - break; - case DRM_MODE_ENCODER_TMDS: - encoder = mdp4_dtv_encoder_init(dev); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct DTV encoder\n"); - return PTR_ERR(encoder); - } - - /* DTV can be hooked to DMA_E: */ - encoder->possible_crtcs = 1 << 1; - - if (priv->hdmi) { - /* Construct bridge/connector for HDMI: */ - ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); - if (ret) { - dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); - return ret; - } - } - - priv->encoders[priv->num_encoders++] = encoder; - - break; - case DRM_MODE_ENCODER_DSI: - /* only DSI1 supported for now */ - dsi_id = 0; - - if (!priv->dsi[dsi_id]) - break; - - encoder = mdp4_dsi_encoder_init(dev); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); - dev_err(dev->dev, - "failed to construct DSI encoder: %d\n", ret); - return ret; - } - - /* TODO: Add DMA_S later? */ - encoder->possible_crtcs = 1 << DMA_P; - priv->encoders[priv->num_encoders++] = encoder; - - ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); - if (ret) { - dev_err(dev->dev, "failed to initialize DSI: %d\n", - ret); - return ret; - } - - break; - default: - dev_err(dev->dev, "Invalid or unsupported interface\n"); - return -EINVAL; - } - - return 0; -} - -static int modeset_init(struct mdp4_kms *mdp4_kms) -{ - struct drm_device *dev = mdp4_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - struct drm_plane *plane; - struct drm_crtc *crtc; - int i, ret; - static const enum mdp4_pipe rgb_planes[] = { - RGB1, RGB2, - }; - static const enum mdp4_pipe vg_planes[] = { - VG1, VG2, - }; - static const enum mdp4_dma mdp4_crtcs[] = { - DMA_P, DMA_E, - }; - static const char * const mdp4_crtc_names[] = { - "DMA_P", "DMA_E", - }; - static const int mdp4_intfs[] = { - DRM_MODE_ENCODER_LVDS, - DRM_MODE_ENCODER_DSI, - DRM_MODE_ENCODER_TMDS, - }; - - /* construct non-private planes: */ - for (i = 0; i < ARRAY_SIZE(vg_planes); i++) { - plane = mdp4_plane_init(dev, vg_planes[i], false); - if (IS_ERR(plane)) { - dev_err(dev->dev, - "failed to construct plane for VG%d\n", i + 1); - ret = PTR_ERR(plane); - goto fail; - } - priv->planes[priv->num_planes++] = plane; - } - - for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) { - plane = mdp4_plane_init(dev, rgb_planes[i], true); - if (IS_ERR(plane)) { - dev_err(dev->dev, - "failed to construct plane for RGB%d\n", i + 1); - ret = PTR_ERR(plane); - goto fail; - } - - crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i, - mdp4_crtcs[i]); - if (IS_ERR(crtc)) { - dev_err(dev->dev, "failed to construct crtc for %s\n", - mdp4_crtc_names[i]); - ret = PTR_ERR(crtc); - goto fail; - } - - priv->crtcs[priv->num_crtcs++] = crtc; - } - - /* - * we currently set up two relatively fixed paths: - * - * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS - * or - * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel - * - * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI - */ - - for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) { - ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]); - if (ret) { - dev_err(dev->dev, "failed to initialize intf: %d, %d\n", - i, ret); - goto fail; - } - } - - return 0; - -fail: - return ret; -} - -struct msm_kms *mdp4_kms_init(struct drm_device *dev) -{ - struct platform_device *pdev = to_platform_device(dev->dev); - struct mdp4_platform_config *config = mdp4_get_config(pdev); - struct mdp4_kms *mdp4_kms; - struct msm_kms *kms = NULL; - struct msm_gem_address_space *aspace; - int irq, ret; - - mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); - if (!mdp4_kms) { - dev_err(dev->dev, "failed to allocate kms\n"); - ret = -ENOMEM; - goto fail; - } - - mdp_kms_init(&mdp4_kms->base, &kms_funcs); - - kms = &mdp4_kms->base.base; - - mdp4_kms->dev = dev; - - mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4"); - if (IS_ERR(mdp4_kms->mmio)) { - ret = PTR_ERR(mdp4_kms->mmio); - goto fail; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - dev_err(dev->dev, "failed to get irq: %d\n", ret); - goto fail; - } - - kms->irq = irq; - - /* NOTE: driver for this regulator still missing upstream.. use - * _get_exclusive() and ignore the error if it does not exist - * (and hope that the bootloader left it on for us) - */ - mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); - if (IS_ERR(mdp4_kms->vdd)) - mdp4_kms->vdd = NULL; - - if (mdp4_kms->vdd) { - ret = regulator_enable(mdp4_kms->vdd); - if (ret) { - dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); - goto fail; - } - } - - mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); - if (IS_ERR(mdp4_kms->clk)) { - dev_err(dev->dev, "failed to get core_clk\n"); - ret = PTR_ERR(mdp4_kms->clk); - goto fail; - } - - mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); - if (IS_ERR(mdp4_kms->pclk)) - mdp4_kms->pclk = NULL; - - // XXX if (rev >= MDP_REV_42) { ??? - mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); - if (IS_ERR(mdp4_kms->lut_clk)) { - dev_err(dev->dev, "failed to get lut_clk\n"); - ret = PTR_ERR(mdp4_kms->lut_clk); - goto fail; - } - - mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); - if (IS_ERR(mdp4_kms->axi_clk)) { - dev_err(dev->dev, "failed to get axi_clk\n"); - ret = PTR_ERR(mdp4_kms->axi_clk); - goto fail; - } - - clk_set_rate(mdp4_kms->clk, config->max_clk); - clk_set_rate(mdp4_kms->lut_clk, config->max_clk); - - pm_runtime_enable(dev->dev); - mdp4_kms->rpm_enabled = true; - - /* make sure things are off before attaching iommu (bootloader could - * have left things on, in which case we'll start getting faults if - * we don't disable): - */ - mdp4_enable(mdp4_kms); - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); - mdp4_disable(mdp4_kms); - mdelay(16); - - if (config->iommu) { - aspace = msm_gem_address_space_create(&pdev->dev, - config->iommu, "mdp4"); - if (IS_ERR(aspace)) { - ret = PTR_ERR(aspace); - goto fail; - } - - kms->aspace = aspace; - - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) - goto fail; - } else { - dev_info(dev->dev, "no iommu, fallback to phys " - "contig buffers for scanout\n"); - aspace = NULL; - } - - ret = modeset_init(mdp4_kms); - if (ret) { - dev_err(dev->dev, "modeset_init failed: %d\n", ret); - goto fail; - } - - mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); - if (IS_ERR(mdp4_kms->blank_cursor_bo)) { - ret = PTR_ERR(mdp4_kms->blank_cursor_bo); - dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); - mdp4_kms->blank_cursor_bo = NULL; - goto fail; - } - - ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace, - &mdp4_kms->blank_cursor_iova); - if (ret) { - dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); - goto fail; - } - - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; - - return kms; - -fail: - if (kms) - mdp4_destroy(kms); - return ERR_PTR(ret); -} - -static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) -{ - static struct mdp4_platform_config config = {}; - - /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ - config.max_clk = 266667000; - config.iommu = iommu_domain_alloc(&platform_bus_type); - if (config.iommu) { - config.iommu->geometry.aperture_start = 0x1000; - config.iommu->geometry.aperture_end = 0xffffffff; - } - - return &config; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h deleted file mode 100644 index a1b3e31e959e..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MDP4_KMS_H__ -#define __MDP4_KMS_H__ - -#include - -#include "msm_drv.h" -#include "msm_kms.h" -#include "mdp/mdp_kms.h" -#include "mdp4.xml.h" - -struct device_node; - -struct mdp4_kms { - struct mdp_kms base; - - struct drm_device *dev; - - int rev; - - void __iomem *mmio; - - struct regulator *vdd; - - struct clk *clk; - struct clk *pclk; - struct clk *lut_clk; - struct clk *axi_clk; - - struct mdp_irq error_handler; - - bool rpm_enabled; - - /* empty/blank cursor bo to use when cursor is "disabled" */ - struct drm_gem_object *blank_cursor_bo; - uint64_t blank_cursor_iova; -}; -#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) - -/* platform config data (ie. from DT, or pdata) */ -struct mdp4_platform_config { - struct iommu_domain *iommu; - uint32_t max_clk; -}; - -static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) -{ - msm_writel(data, mdp4_kms->mmio + reg); -} - -static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg) -{ - return msm_readl(mdp4_kms->mmio + reg); -} - -static inline uint32_t pipe2flush(enum mdp4_pipe pipe) -{ - switch (pipe) { - case VG1: return MDP4_OVERLAY_FLUSH_VG1; - case VG2: return MDP4_OVERLAY_FLUSH_VG2; - case RGB1: return MDP4_OVERLAY_FLUSH_RGB1; - case RGB2: return MDP4_OVERLAY_FLUSH_RGB2; - default: return 0; - } -} - -static inline uint32_t ovlp2flush(int ovlp) -{ - switch (ovlp) { - case 0: return MDP4_OVERLAY_FLUSH_OVLP0; - case 1: return MDP4_OVERLAY_FLUSH_OVLP1; - default: return 0; - } -} - -static inline uint32_t dma2irq(enum mdp4_dma dma) -{ - switch (dma) { - case DMA_P: return MDP4_IRQ_DMA_P_DONE; - case DMA_S: return MDP4_IRQ_DMA_S_DONE; - case DMA_E: return MDP4_IRQ_DMA_E_DONE; - default: return 0; - } -} - -static inline uint32_t dma2err(enum mdp4_dma dma) -{ - switch (dma) { - case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN; - case DMA_S: return 0; // ??? - case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN; - default: return 0; - } -} - -static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer, - enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage) -{ - switch (pipe) { - case VG1: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); - break; - case VG2: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); - break; - case RGB1: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); - break; - case RGB2: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); - break; - case RGB3: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); - break; - case VG3: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); - break; - case VG4: - mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK | - MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); - mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) | - COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); - break; - default: - WARN(1, "invalid pipe"); - break; - } - - return mixer_cfg; -} - -int mdp4_disable(struct mdp4_kms *mdp4_kms); -int mdp4_enable(struct mdp4_kms *mdp4_kms); - -void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask); -void mdp4_irq_preinstall(struct msm_kms *kms); -int mdp4_irq_postinstall(struct msm_kms *kms); -void mdp4_irq_uninstall(struct msm_kms *kms); -irqreturn_t mdp4_irq(struct msm_kms *kms); -int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); - -static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe) -{ - switch (pipe) { - case VG1: - case VG2: - case VG3: - case VG4: - return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; - case RGB1: - case RGB2: - case RGB3: - return MDP_PIPE_CAP_SCALE; - default: - return 0; - } -} - -enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); -struct drm_plane *mdp4_plane_init(struct drm_device *dev, - enum mdp4_pipe pipe_id, bool private_plane); - -uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); -void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); -void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); -void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc); -struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, - struct drm_plane *plane, int id, int ovlp_id, - enum mdp4_dma dma_id); - -long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate); -struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev); - -long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate); -struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, - struct device_node *panel_node); - -struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, - struct device_node *panel_node, struct drm_encoder *encoder); - -#ifdef CONFIG_DRM_MSM_DSI -struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev); -#else -static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) -{ - return ERR_PTR(-ENODEV); -} -#endif - -#ifdef CONFIG_COMMON_CLK -struct clk *mpd4_lvds_pll_init(struct drm_device *dev); -#else -static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) -{ - return ERR_PTR(-ENODEV); -} -#endif - -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -/* bus scaling data is associated with extra pointless platform devices, - * "dtv", etc.. this is a bit of a hack, but we need a way for encoders - * to find their pdata to make the bus-scaling stuff work. - */ -static inline void *mdp4_find_pdata(const char *devname) -{ - struct device *dev; - dev = bus_find_device_by_name(&platform_bus_type, NULL, devname); - return dev ? dev->platform_data : NULL; -} -#endif - -#endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c deleted file mode 100644 index 4a645926edb7..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright (C) 2014 Red Hat - * Author: Rob Clark - * Author: Vinay Simha - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include "mdp4_kms.h" - -struct mdp4_lcdc_encoder { - struct drm_encoder base; - struct device_node *panel_node; - struct drm_panel *panel; - struct clk *lcdc_clk; - unsigned long int pixclock; - struct regulator *regs[3]; - bool enabled; - uint32_t bsc; -}; -#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base) - -static struct mdp4_kms *get_kms(struct drm_encoder *encoder) -{ - struct msm_drm_private *priv = encoder->dev->dev_private; - return to_mdp4_kms(to_mdp_kms(priv->kms)); -} - -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include -static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) -{ - struct drm_device *dev = mdp4_lcdc_encoder->base.dev; - struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0"); - - if (!lcdc_pdata) { - dev_err(dev->dev, "could not find lvds pdata\n"); - return; - } - - if (lcdc_pdata->bus_scale_table) { - mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client( - lcdc_pdata->bus_scale_table); - DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc); - } -} - -static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) -{ - if (mdp4_lcdc_encoder->bsc) { - msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc); - mdp4_lcdc_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) -{ - if (mdp4_lcdc_encoder->bsc) { - DBG("set bus scaling: %d", idx); - msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} -static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} -static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {} -#endif - -static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - bs_fini(mdp4_lcdc_encoder); - drm_encoder_cleanup(encoder); - kfree(mdp4_lcdc_encoder); -} - -static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = { - .destroy = mdp4_lcdc_encoder_destroy, -}; - -/* this should probably be a helper: */ -static struct drm_connector *get_connector(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - return connector; - - return NULL; -} - -static void setup_phy(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct drm_connector *connector = get_connector(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0; - int bpp, nchan, swap; - - if (!connector) - return; - - bpp = 3 * connector->display_info.bpc; - - if (!bpp) - bpp = 18; - - /* TODO, these should come from panel somehow: */ - nchan = 1; - swap = 0; - - switch (bpp) { - case 24: - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06)); - if (nchan == 2) { - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; - } else { - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; - } - break; - - case 18: - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2), - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) | - MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2), - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) | - MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14)); - if (nchan == 2) { - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; - } else { - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; - } - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT; - break; - - default: - dev_err(dev->dev, "unknown bpp: %d\n", bpp); - return; - } - - switch (nchan) { - case 1: - lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0; - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN | - MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL; - break; - case 2: - lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 | - MDP4_LVDS_PHY_CFG0_CHANNEL1; - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN | - MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN; - break; - default: - dev_err(dev->dev, "unknown # of channels: %d\n", nchan); - return; - } - - if (swap) - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP; - - lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE; - - mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf); - mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30); - - mb(); - udelay(1); - lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE; - mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); -} - -static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol; - uint32_t display_v_start, display_v_end; - uint32_t hsync_start_x, hsync_end_x; - - mode = adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - mdp4_lcdc_encoder->pixclock = mode->clock * 1000; - - DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock); - - ctrl_pol = 0; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW; - /* probably need to get DATA_EN polarity from panel.. */ - - lcdc_hsync_skew = 0; /* get this from panel? */ - - hsync_start_x = (mode->htotal - mode->hsync_start); - hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; - - vsync_period = mode->vtotal * mode->htotal; - vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; - display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew; - display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1; - - mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL, - MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | - MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL, - MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) | - MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR, - MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY | - MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL, - MDP4_LCDC_ACTIVE_HCTL_START(0) | - MDP4_LCDC_ACTIVE_HCTL_END(0)); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0); - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0); -} - -static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - struct drm_panel *panel; - int i, ret; - - if (WARN_ON(!mdp4_lcdc_encoder->enabled)) - return; - - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - - panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); - if (panel) { - drm_panel_disable(panel); - drm_panel_unprepare(panel); - } - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); - - clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); - - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); - if (ret) - dev_err(dev->dev, "failed to disable regulator: %d\n", ret); - } - - bs_set(mdp4_lcdc_encoder, 0); - - mdp4_lcdc_encoder->enabled = false; -} - -static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - unsigned long pc = mdp4_lcdc_encoder->pixclock; - struct mdp4_kms *mdp4_kms = get_kms(encoder); - struct drm_panel *panel; - int i, ret; - - if (WARN_ON(mdp4_lcdc_encoder->enabled)) - return; - - /* TODO: hard-coded for 18bpp: */ - mdp4_crtc_set_config(encoder->crtc, - MDP4_DMA_CONFIG_R_BPC(BPC6) | - MDP4_DMA_CONFIG_G_BPC(BPC6) | - MDP4_DMA_CONFIG_B_BPC(BPC6) | - MDP4_DMA_CONFIG_PACK_ALIGN_MSB | - MDP4_DMA_CONFIG_PACK(0x21) | - MDP4_DMA_CONFIG_DEFLKR_EN | - MDP4_DMA_CONFIG_DITHER_EN); - mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); - - bs_set(mdp4_lcdc_encoder, 1); - - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); - if (ret) - dev_err(dev->dev, "failed to enable regulator: %d\n", ret); - } - - DBG("setting lcdc_clk=%lu", pc); - ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); - if (ret) - dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); - ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); - if (ret) - dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); - - panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); - if (panel) { - drm_panel_prepare(panel); - drm_panel_enable(panel); - } - - setup_phy(encoder); - - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); - - mdp4_lcdc_encoder->enabled = true; -} - -static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { - .mode_set = mdp4_lcdc_encoder_mode_set, - .disable = mdp4_lcdc_encoder_disable, - .enable = mdp4_lcdc_encoder_enable, -}; - -long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) -{ - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate); -} - -/* initialize encoder */ -struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, - struct device_node *panel_node) -{ - struct drm_encoder *encoder = NULL; - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; - struct regulator *reg; - int ret; - - mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL); - if (!mdp4_lcdc_encoder) { - ret = -ENOMEM; - goto fail; - } - - mdp4_lcdc_encoder->panel_node = panel_node; - - encoder = &mdp4_lcdc_encoder->base; - - drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); - drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); - - /* TODO: do we need different pll in other cases? */ - mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); - if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { - dev_err(dev->dev, "failed to get lvds_clk\n"); - ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk); - goto fail; - } - - /* TODO: different regulators in other cases? */ - reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v"); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); - goto fail; - } - mdp4_lcdc_encoder->regs[0] = reg; - - reg = devm_regulator_get(dev->dev, "lvds-pll-vdda"); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); - goto fail; - } - mdp4_lcdc_encoder->regs[1] = reg; - - reg = devm_regulator_get(dev->dev, "lvds-vdda"); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret); - goto fail; - } - mdp4_lcdc_encoder->regs[2] = reg; - - bs_init(mdp4_lcdc_encoder); - - return encoder; - -fail: - if (encoder) - mdp4_lcdc_encoder_destroy(encoder); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c deleted file mode 100644 index e3b1c86b7aae..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (C) 2014 Red Hat - * Author: Rob Clark - * Author: Vinay Simha - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include - -#include "mdp4_kms.h" - -struct mdp4_lvds_connector { - struct drm_connector base; - struct drm_encoder *encoder; - struct device_node *panel_node; - struct drm_panel *panel; -}; -#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base) - -static enum drm_connector_status mdp4_lvds_connector_detect( - struct drm_connector *connector, bool force) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - - if (!mdp4_lvds_connector->panel) - mdp4_lvds_connector->panel = - of_drm_find_panel(mdp4_lvds_connector->panel_node); - - return mdp4_lvds_connector->panel ? - connector_status_connected : - connector_status_disconnected; -} - -static void mdp4_lvds_connector_destroy(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - - drm_connector_cleanup(connector); - - kfree(mdp4_lvds_connector); -} - -static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - struct drm_panel *panel = mdp4_lvds_connector->panel; - int ret = 0; - - if (panel) { - drm_panel_attach(panel, connector); - - ret = panel->funcs->get_modes(panel); - - drm_panel_detach(panel); - } - - return ret; -} - -static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - struct drm_encoder *encoder = mdp4_lvds_connector->encoder; - long actual, requested; - - requested = 1000 * mode->clock; - actual = mdp4_lcdc_round_pixclk(encoder, requested); - - DBG("requested=%ld, actual=%ld", requested, actual); - - if (actual != requested) - return MODE_CLOCK_RANGE; - - return MODE_OK; -} - -static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { - .detect = mdp4_lvds_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = mdp4_lvds_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { - .get_modes = mdp4_lvds_connector_get_modes, - .mode_valid = mdp4_lvds_connector_mode_valid, -}; - -/* initialize connector */ -struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, - struct device_node *panel_node, struct drm_encoder *encoder) -{ - struct drm_connector *connector = NULL; - struct mdp4_lvds_connector *mdp4_lvds_connector; - - mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL); - if (!mdp4_lvds_connector) - return ERR_PTR(-ENOMEM); - - mdp4_lvds_connector->encoder = encoder; - mdp4_lvds_connector->panel_node = panel_node; - - connector = &mdp4_lvds_connector->base; - - drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs); - - connector->polled = 0; - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - drm_mode_connector_attach_encoder(connector, encoder); - - return connector; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c deleted file mode 100644 index ce4245971673..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (C) 2014 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include "mdp4_kms.h" - -struct mdp4_lvds_pll { - struct clk_hw pll_hw; - struct drm_device *dev; - unsigned long pixclk; -}; -#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw) - -static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll) -{ - struct msm_drm_private *priv = lvds_pll->dev->dev_private; - return to_mdp4_kms(to_mdp_kms(priv->kms)); -} - -struct pll_rate { - unsigned long rate; - struct { - uint32_t val; - uint32_t reg; - } conf[32]; -}; - -/* NOTE: keep sorted highest freq to lowest: */ -static const struct pll_rate freqtbl[] = { - { 72000000, { - { 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 }, - { 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 }, - { 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 }, - { 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 }, - { 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 }, - { 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 }, - { 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 }, - { 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 }, - { 0, 0 } } - }, -}; - -static const struct pll_rate *find_rate(unsigned long rate) -{ - int i; - for (i = 1; i < ARRAY_SIZE(freqtbl); i++) - if (rate > freqtbl[i].rate) - return &freqtbl[i-1]; - return &freqtbl[i-1]; -} - -static int mpd4_lvds_pll_enable(struct clk_hw *hw) -{ - struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); - struct mdp4_kms *mdp4_kms = get_kms(lvds_pll); - const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk); - int i; - - DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate); - - if (WARN_ON(!pll_rate)) - return -EINVAL; - - mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33); - - for (i = 0; pll_rate->conf[i].reg; i++) - mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val); - - mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01); - - /* Wait until LVDS PLL is locked and ready */ - while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED)) - cpu_relax(); - - return 0; -} - -static void mpd4_lvds_pll_disable(struct clk_hw *hw) -{ - struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); - struct mdp4_kms *mdp4_kms = get_kms(lvds_pll); - - DBG(""); - - mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0); - mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0); -} - -static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); - return lvds_pll->pixclk; -} - -static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - const struct pll_rate *pll_rate = find_rate(rate); - return pll_rate->rate; -} - -static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); - lvds_pll->pixclk = rate; - return 0; -} - - -static const struct clk_ops mpd4_lvds_pll_ops = { - .enable = mpd4_lvds_pll_enable, - .disable = mpd4_lvds_pll_disable, - .recalc_rate = mpd4_lvds_pll_recalc_rate, - .round_rate = mpd4_lvds_pll_round_rate, - .set_rate = mpd4_lvds_pll_set_rate, -}; - -static const char *mpd4_lvds_pll_parents[] = { - "pxo", -}; - -static struct clk_init_data pll_init = { - .name = "mpd4_lvds_pll", - .ops = &mpd4_lvds_pll_ops, - .parent_names = mpd4_lvds_pll_parents, - .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents), -}; - -struct clk *mpd4_lvds_pll_init(struct drm_device *dev) -{ - struct mdp4_lvds_pll *lvds_pll; - struct clk *clk; - int ret; - - lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL); - if (!lvds_pll) { - ret = -ENOMEM; - goto fail; - } - - lvds_pll->dev = dev; - - lvds_pll->pll_hw.init = &pll_init; - clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto fail; - } - - return clk; - -fail: - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c deleted file mode 100644 index 7a1ad3af08e3..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "mdp4_kms.h" - -#define DOWN_SCALE_MAX 8 -#define UP_SCALE_MAX 8 - -struct mdp4_plane { - struct drm_plane base; - const char *name; - - enum mdp4_pipe pipe; - - uint32_t caps; - uint32_t nformats; - uint32_t formats[32]; - - bool enabled; -}; -#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) - -/* MDP format helper functions */ -static inline -enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) -{ - bool is_tile = false; - - if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) - is_tile = true; - - if (fb->format->format == DRM_FORMAT_NV12 && is_tile) - return FRAME_TILE_YCBCR_420; - - return FRAME_LINEAR; -} - -static void mdp4_plane_set_scanout(struct drm_plane *plane, - struct drm_framebuffer *fb); -static int mdp4_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); - -static struct mdp4_kms *get_kms(struct drm_plane *plane) -{ - struct msm_drm_private *priv = plane->dev->dev_private; - return to_mdp4_kms(to_mdp_kms(priv->kms)); -} - -static void mdp4_plane_destroy(struct drm_plane *plane) -{ - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - - drm_plane_helper_disable(plane); - drm_plane_cleanup(plane); - - kfree(mdp4_plane); -} - -/* helper to install properties which are common to planes and crtcs */ -static void mdp4_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj) -{ - // XXX -} - -static int mdp4_plane_set_property(struct drm_plane *plane, - struct drm_property *property, uint64_t val) -{ - // XXX - return -EINVAL; -} - -static const struct drm_plane_funcs mdp4_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = mdp4_plane_destroy, - .set_property = mdp4_plane_set_property, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -}; - -static int mdp4_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - struct mdp4_kms *mdp4_kms = get_kms(plane); - struct msm_kms *kms = &mdp4_kms->base.base; - struct drm_framebuffer *fb = new_state->fb; - - if (!fb) - return 0; - - DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); - return msm_framebuffer_prepare(fb, kms->aspace); -} - -static void mdp4_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - struct mdp4_kms *mdp4_kms = get_kms(plane); - struct msm_kms *kms = &mdp4_kms->base.base; - struct drm_framebuffer *fb = old_state->fb; - - if (!fb) - return; - - DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); - msm_framebuffer_cleanup(fb, kms->aspace); -} - - -static int mdp4_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) -{ - return 0; -} - -static void mdp4_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_plane_state *state = plane->state; - int ret; - - ret = mdp4_plane_mode_set(plane, - state->crtc, state->fb, - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x, state->src_y, - state->src_w, state->src_h); - /* atomic_check should have ensured that this doesn't fail */ - WARN_ON(ret < 0); -} - -static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = { - .prepare_fb = mdp4_plane_prepare_fb, - .cleanup_fb = mdp4_plane_cleanup_fb, - .atomic_check = mdp4_plane_atomic_check, - .atomic_update = mdp4_plane_atomic_update, -}; - -static void mdp4_plane_set_scanout(struct drm_plane *plane, - struct drm_framebuffer *fb) -{ - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - struct mdp4_kms *mdp4_kms = get_kms(plane); - struct msm_kms *kms = &mdp4_kms->base.base; - enum mdp4_pipe pipe = mdp4_plane->pipe; - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), - MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | - MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe), - MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | - MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), - msm_framebuffer_iova(fb, kms->aspace, 0)); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), - msm_framebuffer_iova(fb, kms->aspace, 1)); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), - msm_framebuffer_iova(fb, kms->aspace, 2)); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), - msm_framebuffer_iova(fb, kms->aspace, 3)); - - plane->fb = fb; -} - -static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms, - enum mdp4_pipe pipe, struct csc_cfg *csc) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) { - mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i), - csc->matrix[i]); - } - - for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) { - mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i), - csc->pre_bias[i]); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i), - csc->post_bias[i]); - } - - for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) { - mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i), - csc->pre_clamp[i]); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i), - csc->post_clamp[i]); - } -} - -#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 - -static int mdp4_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) -{ - struct drm_device *dev = plane->dev; - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - struct mdp4_kms *mdp4_kms = get_kms(plane); - enum mdp4_pipe pipe = mdp4_plane->pipe; - const struct mdp_format *format; - uint32_t op_mode = 0; - uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; - uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; - enum mdp4_frame_format frame_type; - - if (!(crtc && fb)) { - DBG("%s: disabled!", mdp4_plane->name); - return 0; - } - - frame_type = mdp4_get_frame_format(fb); - - /* src values are in Q16 fixed point, convert to integer: */ - src_x = src_x >> 16; - src_y = src_y >> 16; - src_w = src_w >> 16; - src_h = src_h >> 16; - - DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name, - fb->base.id, src_x, src_y, src_w, src_h, - crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - - format = to_mdp_format(msm_framebuffer_format(fb)); - - if (src_w > (crtc_w * DOWN_SCALE_MAX)) { - dev_err(dev->dev, "Width down scaling exceeds limits!\n"); - return -ERANGE; - } - - if (src_h > (crtc_h * DOWN_SCALE_MAX)) { - dev_err(dev->dev, "Height down scaling exceeds limits!\n"); - return -ERANGE; - } - - if (crtc_w > (src_w * UP_SCALE_MAX)) { - dev_err(dev->dev, "Width up scaling exceeds limits!\n"); - return -ERANGE; - } - - if (crtc_h > (src_h * UP_SCALE_MAX)) { - dev_err(dev->dev, "Height up scaling exceeds limits!\n"); - return -ERANGE; - } - - if (src_w != crtc_w) { - uint32_t sel_unit = SCALE_FIR; - op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; - - if (MDP_FORMAT_IS_YUV(format)) { - if (crtc_w > src_w) - sel_unit = SCALE_PIXEL_RPT; - else if (crtc_w <= (src_w / 4)) - sel_unit = SCALE_MN_PHASE; - - op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit); - phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, - src_w, crtc_w); - } - } - - if (src_h != crtc_h) { - uint32_t sel_unit = SCALE_FIR; - op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; - - if (MDP_FORMAT_IS_YUV(format)) { - - if (crtc_h > src_h) - sel_unit = SCALE_PIXEL_RPT; - else if (crtc_h <= (src_h / 4)) - sel_unit = SCALE_MN_PHASE; - - op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit); - phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, - src_h, crtc_h); - } - } - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe), - MDP4_PIPE_SRC_SIZE_WIDTH(src_w) | - MDP4_PIPE_SRC_SIZE_HEIGHT(src_h)); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe), - MDP4_PIPE_SRC_XY_X(src_x) | - MDP4_PIPE_SRC_XY_Y(src_y)); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe), - MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) | - MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), - MDP4_PIPE_DST_XY_X(crtc_x) | - MDP4_PIPE_DST_XY_Y(crtc_y)); - - mdp4_plane_set_scanout(plane, fb); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), - MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | - MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | - MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | - MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | - COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | - MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | - MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | - MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | - MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | - MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) | - COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), - MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | - MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | - MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | - MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); - - if (MDP_FORMAT_IS_YUV(format)) { - struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB); - - op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR; - op_mode |= MDP4_PIPE_OP_MODE_CSC_EN; - mdp4_write_csc_config(mdp4_kms, pipe, csc); - } - - mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); - - if (frame_type != FRAME_LINEAR) - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe), - MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) | - MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h)); - - return 0; -} - -static const char *pipe_names[] = { - "VG1", "VG2", - "RGB1", "RGB2", "RGB3", - "VG3", "VG4", -}; - -enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane) -{ - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - return mdp4_plane->pipe; -} - -/* initialize plane */ -struct drm_plane *mdp4_plane_init(struct drm_device *dev, - enum mdp4_pipe pipe_id, bool private_plane) -{ - struct drm_plane *plane = NULL; - struct mdp4_plane *mdp4_plane; - int ret; - enum drm_plane_type type; - - mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL); - if (!mdp4_plane) { - ret = -ENOMEM; - goto fail; - } - - plane = &mdp4_plane->base; - - mdp4_plane->pipe = pipe_id; - mdp4_plane->name = pipe_names[pipe_id]; - mdp4_plane->caps = mdp4_pipe_caps(pipe_id); - - mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats, - ARRAY_SIZE(mdp4_plane->formats), - !pipe_supports_yuv(mdp4_plane->caps)); - - type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; - ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, - mdp4_plane->formats, mdp4_plane->nformats, - NULL, type, NULL); - if (ret) - goto fail; - - drm_plane_helper_add(plane, &mdp4_plane_helper_funcs); - - mdp4_plane_install_properties(plane, &plane->base); - - return plane; - -fail: - if (plane) - mdp4_plane_destroy(plane); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h deleted file mode 100644 index d9c10e02ee41..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ /dev/null @@ -1,1968 +0,0 @@ -#ifndef MDP5_XML -#define MDP5_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) - -Copyright (C) 2013-2017 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mdp5_intf_type { - INTF_DISABLED = 0, - INTF_DSI = 1, - INTF_HDMI = 3, - INTF_LCDC = 5, - INTF_eDP = 9, - INTF_VIRTUAL = 100, - INTF_WB = 101, -}; - -enum mdp5_intfnum { - NO_INTF = 0, - INTF0 = 1, - INTF1 = 2, - INTF2 = 3, - INTF3 = 4, -}; - -enum mdp5_pipe { - SSPP_NONE = 0, - SSPP_VIG0 = 1, - SSPP_VIG1 = 2, - SSPP_VIG2 = 3, - SSPP_RGB0 = 4, - SSPP_RGB1 = 5, - SSPP_RGB2 = 6, - SSPP_DMA0 = 7, - SSPP_DMA1 = 8, - SSPP_VIG3 = 9, - SSPP_RGB3 = 10, - SSPP_CURSOR0 = 11, - SSPP_CURSOR1 = 12, -}; - -enum mdp5_ctl_mode { - MODE_NONE = 0, - MODE_WB_0_BLOCK = 1, - MODE_WB_1_BLOCK = 2, - MODE_WB_0_LINE = 3, - MODE_WB_1_LINE = 4, - MODE_WB_2_LINE = 5, -}; - -enum mdp5_pack_3d { - PACK_3D_FRAME_INT = 0, - PACK_3D_H_ROW_INT = 1, - PACK_3D_V_ROW_INT = 2, - PACK_3D_COL_INT = 3, -}; - -enum mdp5_scale_filter { - SCALE_FILTER_NEAREST = 0, - SCALE_FILTER_BIL = 1, - SCALE_FILTER_PCMN = 2, - SCALE_FILTER_CA = 3, -}; - -enum mdp5_pipe_bwc { - BWC_LOSSLESS = 0, - BWC_Q_HIGH = 1, - BWC_Q_MED = 2, -}; - -enum mdp5_cursor_format { - CURSOR_FMT_ARGB8888 = 0, - CURSOR_FMT_ARGB1555 = 2, - CURSOR_FMT_ARGB4444 = 4, -}; - -enum mdp5_cursor_alpha { - CURSOR_ALPHA_CONST = 0, - CURSOR_ALPHA_PER_PIXEL = 2, -}; - -enum mdp5_igc_type { - IGC_VIG = 0, - IGC_RGB = 1, - IGC_DMA = 2, - IGC_DSPP = 3, -}; - -enum mdp5_data_format { - DATA_FORMAT_RGB = 0, - DATA_FORMAT_YUV = 1, -}; - -enum mdp5_block_size { - BLOCK_SIZE_64 = 0, - BLOCK_SIZE_128 = 1, -}; - -enum mdp5_rotate_mode { - ROTATE_0 = 0, - ROTATE_90 = 1, -}; - -enum mdp5_chroma_downsample_method { - DS_MTHD_NO_PIXEL_DROP = 0, - DS_MTHD_PIXEL_DROP = 1, -}; - -#define MDP5_IRQ_WB_0_DONE 0x00000001 -#define MDP5_IRQ_WB_1_DONE 0x00000002 -#define MDP5_IRQ_WB_2_DONE 0x00000010 -#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100 -#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200 -#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400 -#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800 -#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000 -#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000 -#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000 -#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000 -#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000 -#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000 -#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000 -#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000 -#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000 -#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000 -#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000 -#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000 -#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 -#define MDP5_IRQ_INTF0_VSYNC 0x02000000 -#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 -#define MDP5_IRQ_INTF1_VSYNC 0x08000000 -#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000 -#define MDP5_IRQ_INTF2_VSYNC 0x20000000 -#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 -#define MDP5_IRQ_INTF3_VSYNC 0x80000000 -#define REG_MDSS_HW_VERSION 0x00000000 -#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff -#define MDSS_HW_VERSION_STEP__SHIFT 0 -static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val) -{ - return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK; -} -#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000 -#define MDSS_HW_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val) -{ - return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK; -} -#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000 -#define MDSS_HW_VERSION_MAJOR__SHIFT 28 -static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val) -{ - return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK; -} - -#define REG_MDSS_HW_INTR_STATUS 0x00000010 -#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001 -#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010 -#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020 -#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 -#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 - -#define REG_MDP5_HW_VERSION 0x00000000 -#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff -#define MDP5_HW_VERSION_STEP__SHIFT 0 -static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val) -{ - return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK; -} -#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000 -#define MDP5_HW_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val) -{ - return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK; -} -#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000 -#define MDP5_HW_VERSION_MAJOR__SHIFT 28 -static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val) -{ - return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK; -} - -#define REG_MDP5_DISP_INTF_SEL 0x00000004 -#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff -#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; -} -#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 -#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; -} -#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 -#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; -} -#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 -#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; -} - -#define REG_MDP5_INTR_EN 0x00000010 - -#define REG_MDP5_INTR_STATUS 0x00000014 - -#define REG_MDP5_INTR_CLEAR 0x00000018 - -#define REG_MDP5_HIST_INTR_EN 0x0000001c - -#define REG_MDP5_HIST_INTR_STATUS 0x00000020 - -#define REG_MDP5_HIST_INTR_CLEAR 0x00000024 - -#define REG_MDP5_SPARE_0 0x00000028 -#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 - -static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; } - -static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; } -#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff -#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; -} -#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; -} -#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; -} - -static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; } - -static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; } -#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff -#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; -} -#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; -} -#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; -} - -static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) -{ - switch (idx) { - case IGC_VIG: return 0x00000200; - case IGC_RGB: return 0x00000210; - case IGC_DMA: return 0x00000220; - case IGC_DSPP: return 0x00000300; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } - -static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } -#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff -#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 -static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) -{ - return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; -} -#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 - -#define REG_MDP5_SPLIT_DPL_EN 0x000002f4 - -#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8 -#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 -#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 -#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 - -#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0 -#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 -#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 -#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 - -static inline uint32_t __offset_CTL(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->ctl.base[0]); - case 1: return (mdp5_cfg->ctl.base[1]); - case 2: return (mdp5_cfg->ctl.base[2]); - case 3: return (mdp5_cfg->ctl.base[3]); - case 4: return (mdp5_cfg->ctl.base[4]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); } - -static inline uint32_t __offset_LAYER(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000000; - case 1: return 0x00000004; - case 2: return 0x00000008; - case 3: return 0x0000000c; - case 4: return 0x00000010; - case 5: return 0x00000024; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } - -static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } -#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 -#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; -} -#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 -#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; -} -#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 -#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 -#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 -#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 -#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; -} -#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 -#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; -} -#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 -#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; -} -#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 -#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 -#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 -#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 -#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; -} - -static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); } -#define MDP5_CTL_OP_MODE__MASK 0x0000000f -#define MDP5_CTL_OP_MODE__SHIFT 0 -static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) -{ - return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK; -} -#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070 -#define MDP5_CTL_OP_INTF_NUM__SHIFT 4 -static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val) -{ - return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK; -} -#define MDP5_CTL_OP_CMD_MODE 0x00020000 -#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000 -#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000 -#define MDP5_CTL_OP_PACK_3D__SHIFT 20 -static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) -{ - return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; -} - -static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); } -#define MDP5_CTL_FLUSH_VIG0 0x00000001 -#define MDP5_CTL_FLUSH_VIG1 0x00000002 -#define MDP5_CTL_FLUSH_VIG2 0x00000004 -#define MDP5_CTL_FLUSH_RGB0 0x00000008 -#define MDP5_CTL_FLUSH_RGB1 0x00000010 -#define MDP5_CTL_FLUSH_RGB2 0x00000020 -#define MDP5_CTL_FLUSH_LM0 0x00000040 -#define MDP5_CTL_FLUSH_LM1 0x00000080 -#define MDP5_CTL_FLUSH_LM2 0x00000100 -#define MDP5_CTL_FLUSH_LM3 0x00000200 -#define MDP5_CTL_FLUSH_LM4 0x00000400 -#define MDP5_CTL_FLUSH_DMA0 0x00000800 -#define MDP5_CTL_FLUSH_DMA1 0x00001000 -#define MDP5_CTL_FLUSH_DSPP0 0x00002000 -#define MDP5_CTL_FLUSH_DSPP1 0x00004000 -#define MDP5_CTL_FLUSH_DSPP2 0x00008000 -#define MDP5_CTL_FLUSH_WB 0x00010000 -#define MDP5_CTL_FLUSH_CTL 0x00020000 -#define MDP5_CTL_FLUSH_VIG3 0x00040000 -#define MDP5_CTL_FLUSH_RGB3 0x00080000 -#define MDP5_CTL_FLUSH_LM5 0x00100000 -#define MDP5_CTL_FLUSH_DSPP3 0x00200000 -#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000 -#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000 -#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000 -#define MDP5_CTL_FLUSH_TIMING_3 0x10000000 -#define MDP5_CTL_FLUSH_TIMING_2 0x20000000 -#define MDP5_CTL_FLUSH_TIMING_1 0x40000000 -#define MDP5_CTL_FLUSH_TIMING_0 0x80000000 - -static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } - -static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } - -static inline uint32_t __offset_LAYER_EXT(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000040; - case 1: return 0x00000044; - case 2: return 0x00000048; - case 3: return 0x0000004c; - case 4: return 0x00000050; - case 5: return 0x00000054; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } - -static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } -#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001 -#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004 -#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010 -#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040 -#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100 -#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400 -#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000 -#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000 -#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000 -#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000 -#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000 -#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20 -static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK; -} -#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000 -#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26 -static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK; -} - -static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) -{ - switch (idx) { - case SSPP_NONE: return (INVALID_IDX(idx)); - case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); - case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); - case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); - case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]); - case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]); - case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]); - case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]); - case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); - case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); - case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); - case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); - case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); } -#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000 -#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19 -static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val) -{ - return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK; -} -#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000 -#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18 -static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val) -{ - return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; -} -#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000 - -static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK; -} -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK; -} -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK; -} -#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK; -} -#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 -#define MDP5_PIPE_SRC_XY_Y__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK; -} -#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff -#define MDP5_PIPE_SRC_XY_X__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); } -#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK; -} -#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); } -#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 -#define MDP5_PIPE_OUT_XY_Y__SHIFT 16 -static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK; -} -#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff -#define MDP5_PIPE_OUT_XY_X__SHIFT 0 -static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff -#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK; -} -#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 -#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff -#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK; -} -#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 -#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 -#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c -#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 -#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 -#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 -#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 -#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800 -#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000 -#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 -#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000 -#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 -#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff -#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK; -} -#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 -#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK; -} -#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 -#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK; -} -#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 -#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 -#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 -#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 -static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) -{ - return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK; -} -#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000 -#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000 -#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000 -#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000 -#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 -#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 -#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 -#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000 - -static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); } -#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff -#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 -static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) -{ - return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK; -} -#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00 -#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8 -static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) -{ - return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; -} - -static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx) -{ - switch (idx) { - case COMP_0: return 0x00000100; - case COMP_1_2: return 0x00000110; - case COMP_3: return 0x00000120; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } - -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00 -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000 -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000 -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00 -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000 -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000 -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000 -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } - -static inline uint32_t __offset_LM(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->lm.base[0]); - case 1: return (mdp5_cfg->lm.base[1]); - case 2: return (mdp5_cfg->lm.base[2]); - case 3: return (mdp5_cfg->lm.base[3]); - case 4: return (mdp5_cfg->lm.base[4]); - case 5: return (mdp5_cfg->lm.base[5]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } -#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 -#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 - -static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } -#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK; -} -#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } - -static inline uint32_t __offset_BLEND(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000020; - case 1: return 0x00000050; - case 2: return 0x00000080; - case 3: return 0x000000b0; - case 4: return 0x00000230; - case 5: return 0x00000260; - case 6: return 0x00000290; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } -#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 -#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 -static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK; -} -#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004 -#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008 -#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010 -#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020 -#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300 -#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8 -static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK; -} -#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400 -#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800 -#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 -#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 - -static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK; -} -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000 -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff -#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK; -} -#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000 -#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff -#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK; -} -#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000 -#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } -#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff -#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } -#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007 -#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val) -{ - return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff -#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK; -} -#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000 -#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001 -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006 -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1 -static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val) -{ - return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK; -} -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008 - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); } - -static inline uint32_t __offset_DSPP(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->dspp.base[0]); - case 1: return (mdp5_cfg->dspp.base[1]); - case 2: return (mdp5_cfg->dspp.base[2]); - case 3: return (mdp5_cfg->dspp.base[3]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } -#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 -#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e -#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 -static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) -{ - return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK; -} -#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010 -#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100 -#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000 -#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000 -#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000 -#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000 -#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 -#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 - -static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } - -static inline uint32_t __offset_PP(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->pp.base[0]); - case 1: return (mdp5_cfg->pp.base[1]); - case 2: return (mdp5_cfg->pp.base[2]); - case 3: return (mdp5_cfg->pp.base[3]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); } -#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff -#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0 -static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK; -} -#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000 -#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000 - -static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); } -#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff -#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0 -static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK; -} -#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000 -#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16 -static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK; -} - -static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); } -#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff -#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0 -static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK; -} -#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000 -#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16 -static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK; -} - -static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); } -#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff -#define MDP5_PP_SYNC_THRESH_START__SHIFT 0 -static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK; -} -#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000 -#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16 -static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK; -} - -static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } - -static inline uint32_t __offset_WB(uint32_t idx) -{ - switch (idx) { -#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */ - case 0: return (mdp5_cfg->wb.base[0]); - case 1: return (mdp5_cfg->wb.base[1]); - case 2: return (mdp5_cfg->wb.base[2]); - case 3: return (mdp5_cfg->wb.base[3]); - case 4: return (mdp5_cfg->wb.base[4]); -#endif - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } -#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003 -#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c -#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030 -#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0 -#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100 -#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600 -#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9 -static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK; -} -#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000 -#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12 -static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK; -} -#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000 -#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000 -#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000 -#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000 -#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19 -static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK; -} -#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000 -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000 -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23 -static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK; -} -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000 -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26 -static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK; -} -#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000 -#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30 -static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); } -#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001 -#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006 -#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1 -static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK; -} -#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010 -#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4 -static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK; -} -#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020 -#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5 -static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK; -} -#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040 -#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100 -#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200 -#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9 -static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; -} -#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400 -#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10 -static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK; -} -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12 -static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK; -} -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13 -static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK; -} -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14 -static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); } -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK; -} -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK; -} -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK; -} -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); } -#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff -#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0 -static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK; -} -#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000 -#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16 -static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); } -#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff -#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0 -static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK; -} -#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000 -#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16 -static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); } -#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff -#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0 -static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val) -{ - return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK; -} -#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000 -#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16 -static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val) -{ - return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK; -} - -static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK; -} -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK; -} -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK; -} - -static inline uint32_t __offset_INTF(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->intf.base[0]); - case 1: return (mdp5_cfg->intf.base[1]); - case 2: return (mdp5_cfg->intf.base[2]); - case 3: return (mdp5_cfg->intf.base[3]); - case 4: return (mdp5_cfg->intf.base[4]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); } -#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff -#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 -static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) -{ - return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK; -} -#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000 -#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16 -static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) -{ - return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; -} - -static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); } -#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff -#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 -static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK; -} -#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); } -#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff -#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 -static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; -} - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); } -#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff -#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 -static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) -{ - return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK; -} -#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000 -#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16 -static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) -{ - return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; -} - -static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); } -#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK; -} -#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK; -} -#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 - -static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); } -#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 -#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 -#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 - -static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); } - -static inline uint32_t __offset_AD(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->ad.base[0]); - case 1: return (mdp5_cfg->ad.base[1]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); } - - -#endif /* MDP5_XML */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c deleted file mode 100644 index 824067d2d427..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ /dev/null @@ -1,652 +0,0 @@ -/* - * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mdp5_kms.h" -#include "mdp5_cfg.h" - -struct mdp5_cfg_handler { - int revision; - struct mdp5_cfg config; -}; - -/* mdp5_cfg must be exposed (used in mdp5.xml.h) */ -const struct mdp5_cfg_hw *mdp5_cfg = NULL; - -const struct mdp5_cfg_hw msm8x74v1_config = { - .name = "msm8x74v1", - .mdp = { - .count = 1, - .caps = MDP_CAP_SMP | - 0, - }, - .smp = { - .mmb_count = 22, - .mmb_size = 4096, - .clients = { - [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, - [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, - [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, - }, - }, - .ctl = { - .count = 5, - .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, - .flush_hw_mask = 0x0003ffff, - }, - .pipe_vig = { - .count = 3, - .base = { 0x01100, 0x01500, 0x01900 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | - MDP_PIPE_CAP_CSC | - 0, - }, - .pipe_rgb = { - .count = 3, - .base = { 0x01d00, 0x02100, 0x02500 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | - 0, - }, - .pipe_dma = { - .count = 2, - .base = { 0x02900, 0x02d00 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - 0, - }, - .lm = { - .count = 5, - .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 1, .pp = 1, .dspp = 1, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 2, .pp = 2, .dspp = 2, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB }, - { .id = 4, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB }, - }, - .nb_stages = 5, - .max_width = 2048, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 3, - .base = { 0x04500, 0x04900, 0x04d00 }, - }, - .pp = { - .count = 3, - .base = { 0x21a00, 0x21b00, 0x21c00 }, - }, - .intf = { - .base = { 0x21000, 0x21200, 0x21400, 0x21600 }, - .connect = { - [0] = INTF_eDP, - [1] = INTF_DSI, - [2] = INTF_DSI, - [3] = INTF_HDMI, - }, - }, - .max_clk = 200000000, -}; - -const struct mdp5_cfg_hw msm8x74v2_config = { - .name = "msm8x74", - .mdp = { - .count = 1, - .caps = MDP_CAP_SMP | - 0, - }, - .smp = { - .mmb_count = 22, - .mmb_size = 4096, - .clients = { - [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, - [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, - [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, - }, - }, - .ctl = { - .count = 5, - .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, - .flush_hw_mask = 0x0003ffff, - }, - .pipe_vig = { - .count = 3, - .base = { 0x01100, 0x01500, 0x01900 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | - MDP_PIPE_CAP_DECIMATION, - }, - .pipe_rgb = { - .count = 3, - .base = { 0x01d00, 0x02100, 0x02500 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, - }, - .pipe_dma = { - .count = 2, - .base = { 0x02900, 0x02d00 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, - }, - .lm = { - .count = 5, - .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 1, .pp = 1, .dspp = 1, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 2, .pp = 2, .dspp = 2, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 4, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - }, - .nb_stages = 5, - .max_width = 2048, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 3, - .base = { 0x04500, 0x04900, 0x04d00 }, - }, - .ad = { - .count = 2, - .base = { 0x13000, 0x13200 }, - }, - .pp = { - .count = 3, - .base = { 0x12c00, 0x12d00, 0x12e00 }, - }, - .intf = { - .base = { 0x12400, 0x12600, 0x12800, 0x12a00 }, - .connect = { - [0] = INTF_eDP, - [1] = INTF_DSI, - [2] = INTF_DSI, - [3] = INTF_HDMI, - }, - }, - .max_clk = 200000000, -}; - -const struct mdp5_cfg_hw apq8084_config = { - .name = "apq8084", - .mdp = { - .count = 1, - .caps = MDP_CAP_SMP | - MDP_CAP_SRC_SPLIT | - 0, - }, - .smp = { - .mmb_count = 44, - .mmb_size = 8192, - .clients = { - [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, - [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, - [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, - [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, - [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, - }, - .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */ - .reserved = { - /* Two SMP blocks are statically tied to RGB pipes: */ - [16] = 2, [17] = 2, [18] = 2, [22] = 2, - }, - }, - .ctl = { - .count = 5, - .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, - .flush_hw_mask = 0x003fffff, - }, - .pipe_vig = { - .count = 4, - .base = { 0x01100, 0x01500, 0x01900, 0x01d00 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | - MDP_PIPE_CAP_DECIMATION, - }, - .pipe_rgb = { - .count = 4, - .base = { 0x02100, 0x02500, 0x02900, 0x02d00 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, - }, - .pipe_dma = { - .count = 2, - .base = { 0x03100, 0x03500 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, - }, - .lm = { - .count = 6, - .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY | - MDP_LM_CAP_PAIR, }, - { .id = 1, .pp = 1, .dspp = 1, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 2, .pp = 2, .dspp = 2, - .caps = MDP_LM_CAP_DISPLAY | - MDP_LM_CAP_PAIR, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 4, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 5, .pp = 3, .dspp = 3, - .caps = MDP_LM_CAP_DISPLAY, }, - }, - .nb_stages = 5, - .max_width = 2048, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 4, - .base = { 0x05100, 0x05500, 0x05900, 0x05d00 }, - - }, - .ad = { - .count = 3, - .base = { 0x13400, 0x13600, 0x13800 }, - }, - .pp = { - .count = 4, - .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 }, - }, - .intf = { - .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 }, - .connect = { - [0] = INTF_eDP, - [1] = INTF_DSI, - [2] = INTF_DSI, - [3] = INTF_HDMI, - }, - }, - .max_clk = 320000000, -}; - -const struct mdp5_cfg_hw msm8x16_config = { - .name = "msm8x16", - .mdp = { - .count = 1, - .base = { 0x0 }, - .caps = MDP_CAP_SMP | - 0, - }, - .smp = { - .mmb_count = 8, - .mmb_size = 8192, - .clients = { - [SSPP_VIG0] = 1, [SSPP_DMA0] = 4, - [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, - }, - }, - .ctl = { - .count = 5, - .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, - .flush_hw_mask = 0x4003ffff, - }, - .pipe_vig = { - .count = 1, - .base = { 0x04000 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | - MDP_PIPE_CAP_DECIMATION, - }, - .pipe_rgb = { - .count = 2, - .base = { 0x14000, 0x16000 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_DECIMATION, - }, - .pipe_dma = { - .count = 1, - .base = { 0x24000 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, - }, - .lm = { - .count = 2, /* LM0 and LM3 */ - .base = { 0x44000, 0x47000 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB }, - }, - .nb_stages = 8, - .max_width = 2048, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 1, - .base = { 0x54000 }, - - }, - .intf = { - .base = { 0x00000, 0x6a800 }, - .connect = { - [0] = INTF_DISABLED, - [1] = INTF_DSI, - }, - }, - .max_clk = 320000000, -}; - -const struct mdp5_cfg_hw msm8x94_config = { - .name = "msm8x94", - .mdp = { - .count = 1, - .caps = MDP_CAP_SMP | - MDP_CAP_SRC_SPLIT | - 0, - }, - .smp = { - .mmb_count = 44, - .mmb_size = 8192, - .clients = { - [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, - [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, - [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, - [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, - [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, - }, - .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */ - .reserved = { - [1] = 1, [4] = 1, [7] = 1, [19] = 1, - [16] = 5, [17] = 5, [18] = 5, [22] = 5, - }, - }, - .ctl = { - .count = 5, - .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, - .flush_hw_mask = 0xf0ffffff, - }, - .pipe_vig = { - .count = 4, - .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | - MDP_PIPE_CAP_DECIMATION, - }, - .pipe_rgb = { - .count = 4, - .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, - }, - .pipe_dma = { - .count = 2, - .base = { 0x24000, 0x26000 }, - .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, - }, - .lm = { - .count = 6, - .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY | - MDP_LM_CAP_PAIR, }, - { .id = 1, .pp = 1, .dspp = 1, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 2, .pp = 2, .dspp = 2, - .caps = MDP_LM_CAP_DISPLAY | - MDP_LM_CAP_PAIR, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 4, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 5, .pp = 3, .dspp = 3, - .caps = MDP_LM_CAP_DISPLAY, }, - }, - .nb_stages = 8, - .max_width = 2048, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 4, - .base = { 0x54000, 0x56000, 0x58000, 0x5a000 }, - - }, - .ad = { - .count = 3, - .base = { 0x78000, 0x78800, 0x79000 }, - }, - .pp = { - .count = 4, - .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, - }, - .intf = { - .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, - .connect = { - [0] = INTF_DISABLED, - [1] = INTF_DSI, - [2] = INTF_DSI, - [3] = INTF_HDMI, - }, - }, - .max_clk = 400000000, -}; - -const struct mdp5_cfg_hw msm8x96_config = { - .name = "msm8x96", - .mdp = { - .count = 1, - .caps = MDP_CAP_DSC | - MDP_CAP_CDM | - MDP_CAP_SRC_SPLIT | - 0, - }, - .ctl = { - .count = 5, - .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, - .flush_hw_mask = 0xf4ffffff, - }, - .pipe_vig = { - .count = 4, - .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | - MDP_PIPE_CAP_CSC | - MDP_PIPE_CAP_DECIMATION | - MDP_PIPE_CAP_SW_PIX_EXT | - 0, - }, - .pipe_rgb = { - .count = 4, - .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | - MDP_PIPE_CAP_DECIMATION | - MDP_PIPE_CAP_SW_PIX_EXT | - 0, - }, - .pipe_dma = { - .count = 2, - .base = { 0x24000, 0x26000 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SW_PIX_EXT | - 0, - }, - .pipe_cursor = { - .count = 2, - .base = { 0x34000, 0x36000 }, - .caps = MDP_PIPE_CAP_HFLIP | - MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SW_PIX_EXT | - MDP_PIPE_CAP_CURSOR | - 0, - }, - - .lm = { - .count = 6, - .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, - .instances = { - { .id = 0, .pp = 0, .dspp = 0, - .caps = MDP_LM_CAP_DISPLAY | - MDP_LM_CAP_PAIR, }, - { .id = 1, .pp = 1, .dspp = 1, - .caps = MDP_LM_CAP_DISPLAY, }, - { .id = 2, .pp = 2, .dspp = -1, - .caps = MDP_LM_CAP_DISPLAY | - MDP_LM_CAP_PAIR, }, - { .id = 3, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 4, .pp = -1, .dspp = -1, - .caps = MDP_LM_CAP_WB, }, - { .id = 5, .pp = 3, .dspp = -1, - .caps = MDP_LM_CAP_DISPLAY, }, - }, - .nb_stages = 8, - .max_width = 2560, - .max_height = 0xFFFF, - }, - .dspp = { - .count = 2, - .base = { 0x54000, 0x56000 }, - }, - .ad = { - .count = 3, - .base = { 0x78000, 0x78800, 0x79000 }, - }, - .pp = { - .count = 4, - .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, - }, - .cdm = { - .count = 1, - .base = { 0x79200 }, - }, - .dsc = { - .count = 2, - .base = { 0x80000, 0x80400 }, - }, - .intf = { - .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, - .connect = { - [0] = INTF_DISABLED, - [1] = INTF_DSI, - [2] = INTF_DSI, - [3] = INTF_HDMI, - }, - }, - .max_clk = 412500000, -}; - -static const struct mdp5_cfg_handler cfg_handlers[] = { - { .revision = 0, .config = { .hw = &msm8x74v1_config } }, - { .revision = 2, .config = { .hw = &msm8x74v2_config } }, - { .revision = 3, .config = { .hw = &apq8084_config } }, - { .revision = 6, .config = { .hw = &msm8x16_config } }, - { .revision = 9, .config = { .hw = &msm8x94_config } }, - { .revision = 7, .config = { .hw = &msm8x96_config } }, -}; - -static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); - -const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) -{ - return cfg_handler->config.hw; -} - -struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler) -{ - return &cfg_handler->config; -} - -int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler) -{ - return cfg_handler->revision; -} - -void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler) -{ - kfree(cfg_handler); -} - -struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, - uint32_t major, uint32_t minor) -{ - struct drm_device *dev = mdp5_kms->dev; - struct platform_device *pdev = to_platform_device(dev->dev); - struct mdp5_cfg_handler *cfg_handler; - struct mdp5_cfg_platform *pconfig; - int i, ret = 0; - - cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL); - if (unlikely(!cfg_handler)) { - ret = -ENOMEM; - goto fail; - } - - if (major != 1) { - dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n", - major, minor); - ret = -ENXIO; - goto fail; - } - - /* only after mdp5_cfg global pointer's init can we access the hw */ - for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) { - if (cfg_handlers[i].revision != minor) - continue; - mdp5_cfg = cfg_handlers[i].config.hw; - - break; - } - if (unlikely(!mdp5_cfg)) { - dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n", - major, minor); - ret = -ENXIO; - goto fail; - } - - cfg_handler->revision = minor; - cfg_handler->config.hw = mdp5_cfg; - - pconfig = mdp5_get_config(pdev); - memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig)); - - DBG("MDP5: %s hw config selected", mdp5_cfg->name); - - return cfg_handler; - -fail: - if (cfg_handler) - mdp5_cfg_destroy(cfg_handler); - - return NULL; -} - -static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) -{ - static struct mdp5_cfg_platform config = {}; - - config.iommu = iommu_domain_alloc(&platform_bus_type); - if (config.iommu) { - config.iommu->geometry.aperture_start = 0x1000; - config.iommu->geometry.aperture_end = 0xffffffff; - } - - return &config; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h deleted file mode 100644 index 75910d0f2f4c..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MDP5_CFG_H__ -#define __MDP5_CFG_H__ - -#include "msm_drv.h" - -/* - * mdp5_cfg - * - * This module configures the dynamic offsets used by mdp5.xml.h - * (initialized in mdp5_cfg.c) - */ -extern const struct mdp5_cfg_hw *mdp5_cfg; - -#define MAX_CTL 8 -#define MAX_BASES 8 -#define MAX_SMP_BLOCKS 44 -#define MAX_CLIENTS 32 - -typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); - -#define MDP5_SUB_BLOCK_DEFINITION \ - unsigned int count; \ - uint32_t base[MAX_BASES] - -struct mdp5_sub_block { - MDP5_SUB_BLOCK_DEFINITION; -}; - -struct mdp5_lm_instance { - int id; - int pp; - int dspp; - uint32_t caps; -}; - -struct mdp5_lm_block { - MDP5_SUB_BLOCK_DEFINITION; - struct mdp5_lm_instance instances[MAX_BASES]; - uint32_t nb_stages; /* number of stages per blender */ - uint32_t max_width; /* Maximum output resolution */ - uint32_t max_height; -}; - -struct mdp5_pipe_block { - MDP5_SUB_BLOCK_DEFINITION; - uint32_t caps; /* pipe capabilities */ -}; - -struct mdp5_ctl_block { - MDP5_SUB_BLOCK_DEFINITION; - uint32_t flush_hw_mask; /* FLUSH register's hardware mask */ -}; - -struct mdp5_smp_block { - int mmb_count; /* number of SMP MMBs */ - int mmb_size; /* MMB: size in bytes */ - uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */ - mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ - uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ -}; - -struct mdp5_mdp_block { - MDP5_SUB_BLOCK_DEFINITION; - uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */ -}; - -#define MDP5_INTF_NUM_MAX 5 - -struct mdp5_intf_block { - uint32_t base[MAX_BASES]; - u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */ -}; - -struct mdp5_cfg_hw { - char *name; - - struct mdp5_mdp_block mdp; - struct mdp5_smp_block smp; - struct mdp5_ctl_block ctl; - struct mdp5_pipe_block pipe_vig; - struct mdp5_pipe_block pipe_rgb; - struct mdp5_pipe_block pipe_dma; - struct mdp5_pipe_block pipe_cursor; - struct mdp5_lm_block lm; - struct mdp5_sub_block dspp; - struct mdp5_sub_block ad; - struct mdp5_sub_block pp; - struct mdp5_sub_block dsc; - struct mdp5_sub_block cdm; - struct mdp5_intf_block intf; - - uint32_t max_clk; -}; - -/* platform config data (ie. from DT, or pdata) */ -struct mdp5_cfg_platform { - struct iommu_domain *iommu; -}; - -struct mdp5_cfg { - const struct mdp5_cfg_hw *hw; - struct mdp5_cfg_platform platform; -}; - -struct mdp5_kms; -struct mdp5_cfg_handler; - -const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd); -struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd); -int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); - -#define mdp5_cfg_intf_is_virtual(intf_type) ({ \ - typeof(intf_type) __val = (intf_type); \ - (__val) >= INTF_VIRTUAL ? true : false; }) - -struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, - uint32_t major, uint32_t minor); -void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); - -#endif /* __MDP5_CFG_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c deleted file mode 100644 index 1abc7f5c345c..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include - -#include "mdp5_kms.h" - -static struct mdp5_kms *get_kms(struct drm_encoder *encoder) -{ - struct msm_drm_private *priv = encoder->dev->dev_private; - return to_mdp5_kms(to_mdp_kms(priv->kms)); -} - -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include -#include -#include - -static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) -{ - if (mdp5_cmd_enc->bsc) { - DBG("set bus scaling: %d", idx); - /* HACK: scaling down, and then immediately back up - * seems to leave things broken (underflow).. so - * never disable: - */ - idx = 1; - msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx); - } -} -#else -static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} -#endif - -#define VSYNC_CLK_RATE 19200000 -static int pingpong_tearcheck_setup(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct device *dev = encoder->dev->dev; - u32 total_lines_x100, vclks_line, cfg; - long vsync_clk_speed; - struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); - int pp_id = mixer->pp; - - if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { - dev_err(dev, "vsync_clk is not initialized\n"); - return -EINVAL; - } - - total_lines_x100 = mode->vtotal * mode->vrefresh; - if (!total_lines_x100) { - dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", - __func__, mode->vtotal, mode->vrefresh); - return -EINVAL; - } - - vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE); - if (vsync_clk_speed <= 0) { - dev_err(dev, "vsync_clk round rate failed %ld\n", - vsync_clk_speed); - return -EINVAL; - } - vclks_line = vsync_clk_speed * 100 / total_lines_x100; - - cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN - | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; - cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); - - mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); - mdp5_write(mdp5_kms, - REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0); - mdp5_write(mdp5_kms, - REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); - mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); - mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay); - mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id), - MDP5_PP_SYNC_THRESH_START(4) | - MDP5_PP_SYNC_THRESH_CONTINUE(4)); - - return 0; -} - -static int pingpong_tearcheck_enable(struct drm_encoder *encoder) -{ - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); - int pp_id = mixer->pp; - int ret; - - ret = clk_set_rate(mdp5_kms->vsync_clk, - clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE)); - if (ret) { - dev_err(encoder->dev->dev, - "vsync_clk clk_set_rate failed, %d\n", ret); - return ret; - } - ret = clk_prepare_enable(mdp5_kms->vsync_clk); - if (ret) { - dev_err(encoder->dev->dev, - "vsync_clk clk_prepare_enable failed, %d\n", ret); - return ret; - } - - mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1); - - return 0; -} - -static void pingpong_tearcheck_disable(struct drm_encoder *encoder) -{ - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); - int pp_id = mixer->pp; - - mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); - clk_disable_unprepare(mdp5_kms->vsync_clk); -} - -void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - mode = adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - pingpong_tearcheck_setup(encoder, mode); - mdp5_crtc_set_pipeline(encoder->crtc); -} - -void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); - struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; - struct mdp5_interface *intf = mdp5_cmd_enc->intf; - struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); - - if (WARN_ON(!mdp5_cmd_enc->enabled)) - return; - - pingpong_tearcheck_disable(encoder); - - mdp5_ctl_set_encoder_state(ctl, pipeline, false); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); - - bs_set(mdp5_cmd_enc, 0); - - mdp5_cmd_enc->enabled = false; -} - -void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); - struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; - struct mdp5_interface *intf = mdp5_cmd_enc->intf; - struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); - - if (WARN_ON(mdp5_cmd_enc->enabled)) - return; - - bs_set(mdp5_cmd_enc, 1); - if (pingpong_tearcheck_enable(encoder)) - return; - - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); - - mdp5_ctl_set_encoder_state(ctl, pipeline, true); - - mdp5_cmd_enc->enabled = true; -} - -int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder) -{ - struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms; - struct device *dev; - int intf_num; - u32 data = 0; - - if (!encoder || !slave_encoder) - return -EINVAL; - - mdp5_kms = get_kms(encoder); - intf_num = mdp5_cmd_enc->intf->num; - - /* Switch slave encoder's trigger MUX, to use the master's - * start signal for the slave encoder - */ - if (intf_num == 1) - data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; - else if (intf_num == 2) - data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; - else - return -EINVAL; - - /* Smart Panel, Sync mode */ - data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; - - dev = &mdp5_kms->pdev->dev; - - /* Make sure clocks are on when connectors calling this function. */ - pm_runtime_get_sync(dev); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); - - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, - MDP5_SPLIT_DPL_LOWER_SMART_PANEL); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); - pm_runtime_put_sync(dev); - - return 0; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c deleted file mode 100644 index 8c5ed0b59e46..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ /dev/null @@ -1,1194 +0,0 @@ -/* - * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include -#include -#include -#include - -#include "mdp5_kms.h" - -#define CURSOR_WIDTH 64 -#define CURSOR_HEIGHT 64 - -struct mdp5_crtc { - struct drm_crtc base; - int id; - bool enabled; - - spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ - - /* if there is a pending flip, these will be non-null: */ - struct drm_pending_vblank_event *event; - - /* Bits have been flushed at the last commit, - * used to decide if a vsync has happened since last commit. - */ - u32 flushed_mask; - -#define PENDING_CURSOR 0x1 -#define PENDING_FLIP 0x2 - atomic_t pending; - - /* for unref'ing cursor bo's after scanout completes: */ - struct drm_flip_work unref_cursor_work; - - struct mdp_irq vblank; - struct mdp_irq err; - struct mdp_irq pp_done; - - struct completion pp_completion; - - bool lm_cursor_enabled; - - struct { - /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ - spinlock_t lock; - - /* current cursor being scanned out: */ - struct drm_gem_object *scanout_bo; - uint64_t iova; - uint32_t width, height; - uint32_t x, y; - } cursor; -}; -#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) - -static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc); - -static struct mdp5_kms *get_kms(struct drm_crtc *crtc) -{ - struct msm_drm_private *priv = crtc->dev->dev_private; - return to_mdp5_kms(to_mdp_kms(priv->kms)); -} - -static void request_pending(struct drm_crtc *crtc, uint32_t pending) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - - atomic_or(pending, &mdp5_crtc->pending); - mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); -} - -static void request_pp_done_pending(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - reinit_completion(&mdp5_crtc->pp_completion); -} - -static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_ctl *ctl = mdp5_cstate->ctl; - struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; - - DBG("%s: flush=%08x", crtc->name, flush_mask); - return mdp5_ctl_commit(ctl, pipeline, flush_mask); -} - -/* - * flush updates, to make sure hw is updated to new scanout fb, - * so that we can safely queue unref to current fb (ie. next - * vblank we know hw is done w/ previous scanout_fb). - */ -static u32 crtc_flush_all(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_hw_mixer *mixer, *r_mixer; - struct drm_plane *plane; - uint32_t flush_mask = 0; - - /* this should not happen: */ - if (WARN_ON(!mdp5_cstate->ctl)) - return 0; - - drm_atomic_crtc_for_each_plane(plane, crtc) { - if (!plane->state->visible) - continue; - flush_mask |= mdp5_plane_get_flush(plane); - } - - mixer = mdp5_cstate->pipeline.mixer; - flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm); - - r_mixer = mdp5_cstate->pipeline.r_mixer; - if (r_mixer) - flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); - - return crtc_flush(crtc, flush_mask); -} - -/* if file!=NULL, this is preclose potential cancel-flip path */ -static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_ctl *ctl = mdp5_cstate->ctl; - struct drm_device *dev = crtc->dev; - struct drm_pending_vblank_event *event; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - event = mdp5_crtc->event; - if (event) { - mdp5_crtc->event = NULL; - DBG("%s: send event: %p", crtc->name, event); - drm_crtc_send_vblank_event(crtc, event); - } - spin_unlock_irqrestore(&dev->event_lock, flags); - - if (ctl && !crtc->state->enable) { - /* set STAGE_UNUSED for all layers */ - mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0); - /* XXX: What to do here? */ - /* mdp5_crtc->ctl = NULL; */ - } -} - -static void unref_cursor_worker(struct drm_flip_work *work, void *val) -{ - struct mdp5_crtc *mdp5_crtc = - container_of(work, struct mdp5_crtc, unref_cursor_work); - struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); - struct msm_kms *kms = &mdp5_kms->base.base; - - msm_gem_put_iova(val, kms->aspace); - drm_gem_object_put_unlocked(val); -} - -static void mdp5_crtc_destroy(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - - drm_crtc_cleanup(crtc); - drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); - - kfree(mdp5_crtc); -} - -static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) -{ - switch (stage) { - case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA; - case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA; - case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA; - case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA; - case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA; - case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA; - case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA; - default: - return 0; - } -} - -/* - * left/right pipe offsets for the stage array used in blend_setup() - */ -#define PIPE_LEFT 0 -#define PIPE_RIGHT 1 - -/* - * blend_setup() - blend all the planes of a CRTC - * - * If no base layer is available, border will be enabled as the base layer. - * Otherwise all layers will be blended based on their stage calculated - * in mdp5_crtc_atomic_check. - */ -static void blend_setup(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct drm_plane *plane; - const struct mdp5_cfg_hw *hw_cfg; - struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; - const struct mdp_format *format; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - uint32_t lm = mixer->lm; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - uint32_t r_lm = r_mixer ? r_mixer->lm : 0; - struct mdp5_ctl *ctl = mdp5_cstate->ctl; - uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; - unsigned long flags; - enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; - enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; - int i, plane_cnt = 0; - bool bg_alpha_enabled = false; - u32 mixer_op_mode = 0; - u32 val; -#define blender(stage) ((stage) - STAGE0) - - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - - spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); - - /* ctl could be released already when we are shutting down: */ - /* XXX: Can this happen now? */ - if (!ctl) - goto out; - - /* Collect all plane information */ - drm_atomic_crtc_for_each_plane(plane, crtc) { - enum mdp5_pipe right_pipe; - - if (!plane->state->visible) - continue; - - pstate = to_mdp5_plane_state(plane->state); - pstates[pstate->stage] = pstate; - stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane); - /* - * if we have a right mixer, stage the same pipe as we - * have on the left mixer - */ - if (r_mixer) - r_stage[pstate->stage][PIPE_LEFT] = - mdp5_plane_pipe(plane); - /* - * if we have a right pipe (i.e, the plane comprises of 2 - * hwpipes, then stage the right pipe on the right side of both - * the layer mixers - */ - right_pipe = mdp5_plane_right_pipe(plane); - if (right_pipe) { - stage[pstate->stage][PIPE_RIGHT] = right_pipe; - r_stage[pstate->stage][PIPE_RIGHT] = right_pipe; - } - - plane_cnt++; - } - - if (!pstates[STAGE_BASE]) { - ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; - DBG("Border Color is enabled"); - } else if (plane_cnt) { - format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); - - if (format->alpha_enable) - bg_alpha_enabled = true; - } - - /* The reset for blending */ - for (i = STAGE0; i <= STAGE_MAX; i++) { - if (!pstates[i]) - continue; - - format = to_mdp_format( - msm_framebuffer_format(pstates[i]->base.fb)); - plane = pstates[i]->base.plane; - blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | - MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); - fg_alpha = pstates[i]->alpha; - bg_alpha = 0xFF - pstates[i]->alpha; - - if (!format->alpha_enable && bg_alpha_enabled) - mixer_op_mode = 0; - else - mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i); - - DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); - - if (format->alpha_enable && pstates[i]->premultiplied) { - blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | - MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); - if (fg_alpha != 0xff) { - bg_alpha = fg_alpha; - blend_op |= - MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | - MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; - } else { - blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; - } - } else if (format->alpha_enable) { - blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) | - MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); - if (fg_alpha != 0xff) { - bg_alpha = fg_alpha; - blend_op |= - MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA | - MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA | - MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | - MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; - } else { - blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; - } - } - - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm, - blender(i)), blend_op); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, - blender(i)), fg_alpha); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, - blender(i)), bg_alpha); - if (r_mixer) { - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm, - blender(i)), blend_op); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm, - blender(i)), fg_alpha); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm, - blender(i)), bg_alpha); - } - } - - val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), - val | mixer_op_mode); - if (r_mixer) { - val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), - val | mixer_op_mode); - } - - mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt, - ctl_blend_flags); -out: - spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); -} - -static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer; - struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer; - uint32_t lm = mixer->lm; - u32 mixer_width, val; - unsigned long flags; - struct drm_display_mode *mode; - - if (WARN_ON(!crtc->state)) - return; - - mode = &crtc->state->adjusted_mode; - - DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - crtc->name, mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - mixer_width = mode->hdisplay; - if (r_mixer) - mixer_width /= 2; - - spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm), - MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | - MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); - - /* Assign mixer to LEFT side in source split mode */ - val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); - val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val); - - if (r_mixer) { - u32 r_lm = r_mixer->lm; - - mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm), - MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | - MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); - - /* Assign mixer to RIGHT side in source split mode */ - val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); - val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val); - } - - spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); -} - -static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct device *dev = &mdp5_kms->pdev->dev; - - DBG("%s", crtc->name); - - if (WARN_ON(!mdp5_crtc->enabled)) - return; - - /* Disable/save vblank irq handling before power is disabled */ - drm_crtc_vblank_off(crtc); - - if (mdp5_cstate->cmd_mode) - mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); - - mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); - pm_runtime_put_sync(dev); - - mdp5_crtc->enabled = false; -} - -static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct device *dev = &mdp5_kms->pdev->dev; - - DBG("%s", crtc->name); - - if (WARN_ON(mdp5_crtc->enabled)) - return; - - pm_runtime_get_sync(dev); - - if (mdp5_crtc->lm_cursor_enabled) { - /* - * Restore LM cursor state, as it might have been lost - * with suspend: - */ - if (mdp5_crtc->cursor.iova) { - unsigned long flags; - - spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); - mdp5_crtc_restore_cursor(crtc); - spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); - - mdp5_ctl_set_cursor(mdp5_cstate->ctl, - &mdp5_cstate->pipeline, 0, true); - } else { - mdp5_ctl_set_cursor(mdp5_cstate->ctl, - &mdp5_cstate->pipeline, 0, false); - } - } - - /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); - - mdp5_crtc_mode_set_nofb(crtc); - - mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); - - if (mdp5_cstate->cmd_mode) - mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); - - mdp5_crtc->enabled = true; -} - -int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state, - bool need_right_mixer) -{ - struct mdp5_crtc_state *mdp5_cstate = - to_mdp5_crtc_state(new_crtc_state); - struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; - struct mdp5_interface *intf; - bool new_mixer = false; - - new_mixer = !pipeline->mixer; - - if ((need_right_mixer && !pipeline->r_mixer) || - (!need_right_mixer && pipeline->r_mixer)) - new_mixer = true; - - if (new_mixer) { - struct mdp5_hw_mixer *old_mixer = pipeline->mixer; - struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer; - u32 caps; - int ret; - - caps = MDP_LM_CAP_DISPLAY; - if (need_right_mixer) - caps |= MDP_LM_CAP_PAIR; - - ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps, - &pipeline->mixer, need_right_mixer ? - &pipeline->r_mixer : NULL); - if (ret) - return ret; - - mdp5_mixer_release(new_crtc_state->state, old_mixer); - if (old_r_mixer) { - mdp5_mixer_release(new_crtc_state->state, old_r_mixer); - if (!need_right_mixer) - pipeline->r_mixer = NULL; - } - } - - /* - * these should have been already set up in the encoder's atomic - * check (called by drm_atomic_helper_check_modeset) - */ - intf = pipeline->intf; - - mdp5_cstate->err_irqmask = intf2err(intf->num); - mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf); - - if ((intf->type == INTF_DSI) && - (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) { - mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer); - mdp5_cstate->cmd_mode = true; - } else { - mdp5_cstate->pp_done_irqmask = 0; - mdp5_cstate->cmd_mode = false; - } - - return 0; -} - -struct plane_state { - struct drm_plane *plane; - struct mdp5_plane_state *state; -}; - -static int pstate_cmp(const void *a, const void *b) -{ - struct plane_state *pa = (struct plane_state *)a; - struct plane_state *pb = (struct plane_state *)b; - return pa->state->zpos - pb->state->zpos; -} - -/* is there a helper for this? */ -static bool is_fullscreen(struct drm_crtc_state *cstate, - struct drm_plane_state *pstate) -{ - return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && - ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && - ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); -} - -static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state, - struct drm_plane_state *bpstate) -{ - struct mdp5_crtc_state *mdp5_cstate = - to_mdp5_crtc_state(new_crtc_state); - - /* - * if we're in source split mode, it's mandatory to have - * border out on the base stage - */ - if (mdp5_cstate->pipeline.r_mixer) - return STAGE0; - - /* if the bottom-most layer is not fullscreen, we need to use - * it for solid-color: - */ - if (!is_fullscreen(new_crtc_state, bpstate)) - return STAGE0; - - return STAGE_BASE; -} - -static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct drm_plane *plane; - struct drm_device *dev = crtc->dev; - struct plane_state pstates[STAGE_MAX + 1]; - const struct mdp5_cfg_hw *hw_cfg; - const struct drm_plane_state *pstate; - const struct drm_display_mode *mode = &state->adjusted_mode; - bool cursor_plane = false; - bool need_right_mixer = false; - int cnt = 0, i; - int ret; - enum mdp_mixer_stage_id start; - - DBG("%s: check", crtc->name); - - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { - if (!pstate->visible) - continue; - - pstates[cnt].plane = plane; - pstates[cnt].state = to_mdp5_plane_state(pstate); - - /* - * if any plane on this crtc uses 2 hwpipes, then we need - * the crtc to have a right hwmixer. - */ - if (pstates[cnt].state->r_hwpipe) - need_right_mixer = true; - cnt++; - - if (plane->type == DRM_PLANE_TYPE_CURSOR) - cursor_plane = true; - } - - /* bail out early if there aren't any planes */ - if (!cnt) - return 0; - - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - - /* - * we need a right hwmixer if the mode's width is greater than a single - * LM's max width - */ - if (mode->hdisplay > hw_cfg->lm.max_width) - need_right_mixer = true; - - ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer); - if (ret) { - dev_err(dev->dev, "couldn't assign mixers %d\n", ret); - return ret; - } - - /* assign a stage based on sorted zpos property */ - sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); - - /* trigger a warning if cursor isn't the highest zorder */ - WARN_ON(cursor_plane && - (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); - - start = get_start_stage(crtc, state, &pstates[0].state->base); - - /* verify that there are not too many planes attached to crtc - * and that we don't have conflicting mixer stages: - */ - if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) { - dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n", - cnt, start); - return -EINVAL; - } - - for (i = 0; i < cnt; i++) { - if (cursor_plane && (i == (cnt - 1))) - pstates[i].state->stage = hw_cfg->lm.nb_stages; - else - pstates[i].state->stage = start + i; - DBG("%s: assign pipe %s on stage=%d", crtc->name, - pstates[i].plane->name, - pstates[i].state->stage); - } - - return 0; -} - -static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ - DBG("%s: begin", crtc->name); -} - -static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct drm_device *dev = crtc->dev; - unsigned long flags; - - DBG("%s: event: %p", crtc->name, crtc->state->event); - - WARN_ON(mdp5_crtc->event); - - spin_lock_irqsave(&dev->event_lock, flags); - mdp5_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - - /* - * If no CTL has been allocated in mdp5_crtc_atomic_check(), - * it means we are trying to flush a CRTC whose state is disabled: - * nothing else needs to be done. - */ - /* XXX: Can this happen now ? */ - if (unlikely(!mdp5_cstate->ctl)) - return; - - blend_setup(crtc); - - /* PP_DONE irq is only used by command mode for now. - * It is better to request pending before FLUSH and START trigger - * to make sure no pp_done irq missed. - * This is safe because no pp_done will happen before SW trigger - * in command mode. - */ - if (mdp5_cstate->cmd_mode) - request_pp_done_pending(crtc); - - mdp5_crtc->flushed_mask = crtc_flush_all(crtc); - - /* XXX are we leaking out state here? */ - mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask; - mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask; - mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask; - - request_pending(crtc, PENDING_FLIP); -} - -static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - uint32_t xres = crtc->mode.hdisplay; - uint32_t yres = crtc->mode.vdisplay; - - /* - * Cursor Region Of Interest (ROI) is a plane read from cursor - * buffer to render. The ROI region is determined by the visibility of - * the cursor point. In the default Cursor image the cursor point will - * be at the top left of the cursor image, unless it is specified - * otherwise using hotspot feature. - * - * If the cursor point reaches the right (xres - x < cursor.width) or - * bottom (yres - y < cursor.height) boundary of the screen, then ROI - * width and ROI height need to be evaluated to crop the cursor image - * accordingly. - * (xres-x) will be new cursor width when x > (xres - cursor.width) - * (yres-y) will be new cursor height when y > (yres - cursor.height) - */ - *roi_w = min(mdp5_crtc->cursor.width, xres - - mdp5_crtc->cursor.x); - *roi_h = min(mdp5_crtc->cursor.height, yres - - mdp5_crtc->cursor.y); -} - -static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; - uint32_t blendcfg, stride; - uint32_t x, y, width, height; - uint32_t roi_w, roi_h; - int lm; - - assert_spin_locked(&mdp5_crtc->cursor.lock); - - lm = mdp5_cstate->pipeline.mixer->lm; - - x = mdp5_crtc->cursor.x; - y = mdp5_crtc->cursor.y; - width = mdp5_crtc->cursor.width; - height = mdp5_crtc->cursor.height; - - stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); - - get_roi(crtc, &roi_w, &roi_h); - - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), - MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm), - MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | - MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), - MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | - MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm), - MDP5_LM_CURSOR_START_XY_Y_START(y) | - MDP5_LM_CURSOR_START_XY_X_START(x)); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), - mdp5_crtc->cursor.iova); - - blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; - blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); -} - -static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file, uint32_t handle, - uint32_t width, uint32_t height) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; - struct drm_device *dev = crtc->dev; - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct platform_device *pdev = mdp5_kms->pdev; - struct msm_kms *kms = &mdp5_kms->base.base; - struct drm_gem_object *cursor_bo, *old_bo = NULL; - struct mdp5_ctl *ctl; - int ret; - uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); - bool cursor_enable = true; - unsigned long flags; - - if (!mdp5_crtc->lm_cursor_enabled) { - dev_warn(dev->dev, - "cursor_set is deprecated with cursor planes\n"); - return -EINVAL; - } - - if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { - dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); - return -EINVAL; - } - - ctl = mdp5_cstate->ctl; - if (!ctl) - return -EINVAL; - - /* don't support LM cursors when we we have source split enabled */ - if (mdp5_cstate->pipeline.r_mixer) - return -EINVAL; - - if (!handle) { - DBG("Cursor off"); - cursor_enable = false; - mdp5_crtc->cursor.iova = 0; - pm_runtime_get_sync(&pdev->dev); - goto set_cursor; - } - - cursor_bo = drm_gem_object_lookup(file, handle); - if (!cursor_bo) - return -ENOENT; - - ret = msm_gem_get_iova(cursor_bo, kms->aspace, - &mdp5_crtc->cursor.iova); - if (ret) - return -EINVAL; - - pm_runtime_get_sync(&pdev->dev); - - spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); - old_bo = mdp5_crtc->cursor.scanout_bo; - - mdp5_crtc->cursor.scanout_bo = cursor_bo; - mdp5_crtc->cursor.width = width; - mdp5_crtc->cursor.height = height; - - mdp5_crtc_restore_cursor(crtc); - - spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); - -set_cursor: - ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); - if (ret) { - dev_err(dev->dev, "failed to %sable cursor: %d\n", - cursor_enable ? "en" : "dis", ret); - goto end; - } - - crtc_flush(crtc, flush_mask); - -end: - pm_runtime_put_sync(&pdev->dev); - if (old_bo) { - drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); - /* enable vblank to complete cursor work: */ - request_pending(crtc, PENDING_CURSOR); - } - return ret; -} - -static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); - struct drm_device *dev = crtc->dev; - uint32_t roi_w; - uint32_t roi_h; - unsigned long flags; - - if (!mdp5_crtc->lm_cursor_enabled) { - dev_warn(dev->dev, - "cursor_move is deprecated with cursor planes\n"); - return -EINVAL; - } - - /* don't support LM cursors when we we have source split enabled */ - if (mdp5_cstate->pipeline.r_mixer) - return -EINVAL; - - /* In case the CRTC is disabled, just drop the cursor update */ - if (unlikely(!crtc->state->enable)) - return 0; - - mdp5_crtc->cursor.x = x = max(x, 0); - mdp5_crtc->cursor.y = y = max(y, 0); - - get_roi(crtc, &roi_w, &roi_h); - - pm_runtime_get_sync(&mdp5_kms->pdev->dev); - - spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); - mdp5_crtc_restore_cursor(crtc); - spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); - - crtc_flush(crtc, flush_mask); - - pm_runtime_put_sync(&mdp5_kms->pdev->dev); - - return 0; -} - -static void -mdp5_crtc_atomic_print_state(struct drm_printer *p, - const struct drm_crtc_state *state) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); - struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; - struct mdp5_kms *mdp5_kms = get_kms(state->crtc); - - if (WARN_ON(!pipeline)) - return; - - drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ? - pipeline->mixer->name : "(null)"); - - if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) - drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ? - pipeline->r_mixer->name : "(null)"); -} - -static void mdp5_crtc_reset(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate; - - if (crtc->state) { - __drm_atomic_helper_crtc_destroy_state(crtc->state); - kfree(to_mdp5_crtc_state(crtc->state)); - } - - mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL); - - if (mdp5_cstate) { - mdp5_cstate->base.crtc = crtc; - crtc->state = &mdp5_cstate->base; - } -} - -static struct drm_crtc_state * -mdp5_crtc_duplicate_state(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate; - - if (WARN_ON(!crtc->state)) - return NULL; - - mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state), - sizeof(*mdp5_cstate), GFP_KERNEL); - if (!mdp5_cstate) - return NULL; - - __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base); - - return &mdp5_cstate->base; -} - -static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); - - __drm_atomic_helper_crtc_destroy_state(state); - - kfree(mdp5_cstate); -} - -static const struct drm_crtc_funcs mdp5_crtc_funcs = { - .set_config = drm_atomic_helper_set_config, - .destroy = mdp5_crtc_destroy, - .page_flip = drm_atomic_helper_page_flip, - .reset = mdp5_crtc_reset, - .atomic_duplicate_state = mdp5_crtc_duplicate_state, - .atomic_destroy_state = mdp5_crtc_destroy_state, - .cursor_set = mdp5_crtc_cursor_set, - .cursor_move = mdp5_crtc_cursor_move, - .atomic_print_state = mdp5_crtc_atomic_print_state, -}; - -static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { - .mode_set_nofb = mdp5_crtc_mode_set_nofb, - .atomic_check = mdp5_crtc_atomic_check, - .atomic_begin = mdp5_crtc_atomic_begin, - .atomic_flush = mdp5_crtc_atomic_flush, - .atomic_enable = mdp5_crtc_atomic_enable, - .atomic_disable = mdp5_crtc_atomic_disable, -}; - -static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); - struct drm_crtc *crtc = &mdp5_crtc->base; - struct msm_drm_private *priv = crtc->dev->dev_private; - unsigned pending; - - mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); - - pending = atomic_xchg(&mdp5_crtc->pending, 0); - - if (pending & PENDING_FLIP) { - complete_flip(crtc, NULL); - } - - if (pending & PENDING_CURSOR) - drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); -} - -static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); - - DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus); -} - -static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, - pp_done); - - complete(&mdp5_crtc->pp_completion); -} - -static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - int ret; - - ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, - msecs_to_jiffies(50)); - if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", - mdp5_cstate->pipeline.mixer->lm); -} - -static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_ctl *ctl = mdp5_cstate->ctl; - int ret; - - /* Should not call this function if crtc is disabled. */ - if (!ctl) - return; - - ret = drm_crtc_vblank_get(crtc); - if (ret) - return; - - ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, - ((mdp5_ctl_get_commit_status(ctl) & - mdp5_crtc->flushed_mask) == 0), - msecs_to_jiffies(50)); - if (ret <= 0) - dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id); - - mdp5_crtc->flushed_mask = 0; - - drm_crtc_vblank_put(crtc); -} - -uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - return mdp5_crtc->vblank.irqmask; -} - -void mdp5_crtc_set_pipeline(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - - /* should this be done elsewhere ? */ - mdp_irq_update(&mdp5_kms->base); - - mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline); -} - -struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - - return mdp5_cstate->ctl; -} - -struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate; - - if (WARN_ON(!crtc)) - return ERR_PTR(-EINVAL); - - mdp5_cstate = to_mdp5_crtc_state(crtc->state); - - return WARN_ON(!mdp5_cstate->pipeline.mixer) ? - ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer; -} - -struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate; - - if (WARN_ON(!crtc)) - return ERR_PTR(-EINVAL); - - mdp5_cstate = to_mdp5_crtc_state(crtc->state); - - return &mdp5_cstate->pipeline; -} - -void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) -{ - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - - if (mdp5_cstate->cmd_mode) - mdp5_crtc_wait_for_pp_done(crtc); - else - mdp5_crtc_wait_for_flush_done(crtc); -} - -/* initialize crtc */ -struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, - struct drm_plane *plane, - struct drm_plane *cursor_plane, int id) -{ - struct drm_crtc *crtc = NULL; - struct mdp5_crtc *mdp5_crtc; - - mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); - if (!mdp5_crtc) - return ERR_PTR(-ENOMEM); - - crtc = &mdp5_crtc->base; - - mdp5_crtc->id = id; - - spin_lock_init(&mdp5_crtc->lm_lock); - spin_lock_init(&mdp5_crtc->cursor.lock); - init_completion(&mdp5_crtc->pp_completion); - - mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; - mdp5_crtc->err.irq = mdp5_crtc_err_irq; - mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; - - mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; - - drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, - &mdp5_crtc_funcs, NULL); - - drm_flip_work_init(&mdp5_crtc->unref_cursor_work, - "unref cursor", unref_cursor_worker); - - drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); - plane->crtc = crtc; - - return crtc; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c deleted file mode 100644 index 439e0a300e25..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ /dev/null @@ -1,779 +0,0 @@ -/* - * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mdp5_kms.h" -#include "mdp5_ctl.h" - -/* - * CTL - MDP Control Pool Manager - * - * Controls are shared between all display interfaces. - * - * They are intended to be used for data path configuration. - * The top level register programming describes the complete data path for - * a specific data path ID - REG_MDP5_CTL_*(, ...) - * - * Hardware capabilities determine the number of concurrent data paths - * - * In certain use cases (high-resolution dual pipe), one single CTL can be - * shared across multiple CRTCs. - */ - -#define CTL_STAT_BUSY 0x1 -#define CTL_STAT_BOOKED 0x2 - -struct mdp5_ctl { - struct mdp5_ctl_manager *ctlm; - - u32 id; - - /* CTL status bitmask */ - u32 status; - - bool encoder_enabled; - uint32_t start_mask; - - /* REG_MDP5_CTL_*() registers access info + lock: */ - spinlock_t hw_lock; - u32 reg_offset; - - /* when do CTL registers need to be flushed? (mask of trigger bits) */ - u32 pending_ctl_trigger; - - bool cursor_on; - - /* True if the current CTL has FLUSH bits pending for single FLUSH. */ - bool flush_pending; - - struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ -}; - -struct mdp5_ctl_manager { - struct drm_device *dev; - - /* number of CTL / Layer Mixers in this hw config: */ - u32 nlm; - u32 nctl; - - /* to filter out non-present bits in the current hardware config */ - u32 flush_hw_mask; - - /* status for single FLUSH */ - bool single_flush_supported; - u32 single_flush_pending_mask; - - /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ - spinlock_t pool_lock; - struct mdp5_ctl ctls[MAX_CTL]; -}; - -static inline -struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr) -{ - struct msm_drm_private *priv = ctl_mgr->dev->dev_private; - - return to_mdp5_kms(to_mdp_kms(priv->kms)); -} - -static inline -void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) -{ - struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); - - (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ - mdp5_write(mdp5_kms, reg, data); -} - -static inline -u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) -{ - struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); - - (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ - return mdp5_read(mdp5_kms, reg); -} - -static void set_display_intf(struct mdp5_kms *mdp5_kms, - struct mdp5_interface *intf) -{ - unsigned long flags; - u32 intf_sel; - - spin_lock_irqsave(&mdp5_kms->resource_lock, flags); - intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); - - switch (intf->num) { - case 0: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type); - break; - case 1: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type); - break; - case 2: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type); - break; - case 3: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type); - break; - default: - BUG(); - break; - } - - mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); - spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); -} - -static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) -{ - unsigned long flags; - struct mdp5_interface *intf = pipeline->intf; - u32 ctl_op = 0; - - if (!mdp5_cfg_intf_is_virtual(intf->type)) - ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num); - - switch (intf->type) { - case INTF_DSI: - if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - ctl_op |= MDP5_CTL_OP_CMD_MODE; - break; - - case INTF_WB: - if (intf->mode == MDP5_INTF_WB_MODE_LINE) - ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE); - break; - - default: - break; - } - - if (pipeline->r_mixer) - ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE | - MDP5_CTL_OP_PACK_3D(1); - - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); - spin_unlock_irqrestore(&ctl->hw_lock, flags); -} - -int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) -{ - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); - struct mdp5_interface *intf = pipeline->intf; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - - ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | - mdp_ctl_flush_mask_encoder(intf); - if (r_mixer) - ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); - - /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ - if (!mdp5_cfg_intf_is_virtual(intf->type)) - set_display_intf(mdp5_kms, intf); - - set_ctl_op(ctl, pipeline); - - return 0; -} - -static bool start_signal_needed(struct mdp5_ctl *ctl, - struct mdp5_pipeline *pipeline) -{ - struct mdp5_interface *intf = pipeline->intf; - - if (!ctl->encoder_enabled || ctl->start_mask != 0) - return false; - - switch (intf->type) { - case INTF_WB: - return true; - case INTF_DSI: - return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; - default: - return false; - } -} - -/* - * send_start_signal() - Overlay Processor Start Signal - * - * For a given control operation (display pipeline), a START signal needs to be - * executed in order to kick off operation and activate all layers. - * e.g.: DSI command mode, Writeback - */ -static void send_start_signal(struct mdp5_ctl *ctl) -{ - unsigned long flags; - - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); - spin_unlock_irqrestore(&ctl->hw_lock, flags); -} - -static void refill_start_mask(struct mdp5_ctl *ctl, - struct mdp5_pipeline *pipeline) -{ - struct mdp5_interface *intf = pipeline->intf; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - - ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm); - if (r_mixer) - ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); - - /* - * Writeback encoder needs to program & flush - * address registers for each page flip.. - */ - if (intf->type == INTF_WB) - ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf); -} - -/** - * mdp5_ctl_set_encoder_state() - set the encoder state - * - * @enable: true, when encoder is ready for data streaming; false, otherwise. - * - * Note: - * This encoder state is needed to trigger START signal (data path kickoff). - */ -int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, - struct mdp5_pipeline *pipeline, - bool enabled) -{ - struct mdp5_interface *intf = pipeline->intf; - - if (WARN_ON(!ctl)) - return -EINVAL; - - ctl->encoder_enabled = enabled; - DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); - - if (start_signal_needed(ctl, pipeline)) { - send_start_signal(ctl); - refill_start_mask(ctl, pipeline); - } - - return 0; -} - -/* - * Note: - * CTL registers need to be flushed after calling this function - * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) - */ -int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - int cursor_id, bool enable) -{ - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - unsigned long flags; - u32 blend_cfg; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - - if (unlikely(WARN_ON(!mixer))) { - dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM", - ctl->id); - return -EINVAL; - } - - if (pipeline->r_mixer) { - dev_err(ctl_mgr->dev->dev, "unsupported configuration"); - return -EINVAL; - } - - spin_lock_irqsave(&ctl->hw_lock, flags); - - blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); - - if (enable) - blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; - else - blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; - - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); - ctl->cursor_on = enable; - - spin_unlock_irqrestore(&ctl->hw_lock, flags); - - ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); - - return 0; -} - -static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, - enum mdp_mixer_stage_id stage) -{ - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); - case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); - case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); - case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); - case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); - case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); - case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); - case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); - case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); - case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); - case SSPP_CURSOR0: - case SSPP_CURSOR1: - default: return 0; - } -} - -static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, - enum mdp_mixer_stage_id stage) -{ - if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) - return 0; - - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; - case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; - case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; - case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; - case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; - case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; - case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; - case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; - case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; - case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; - case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); - case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); - default: return 0; - } -} - -static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) -{ - unsigned long flags; - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - int i; - - spin_lock_irqsave(&ctl->hw_lock, flags); - - for (i = 0; i < ctl_mgr->nlm; i++) { - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); - ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); - } - - spin_unlock_irqrestore(&ctl->hw_lock, flags); -} - -#define PIPE_LEFT 0 -#define PIPE_RIGHT 1 -int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - enum mdp5_pipe stage[][MAX_PIPE_STAGE], - enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], - u32 stage_cnt, u32 ctl_blend_op_flags) -{ - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - unsigned long flags; - u32 blend_cfg = 0, blend_ext_cfg = 0; - u32 r_blend_cfg = 0, r_blend_ext_cfg = 0; - int i, start_stage; - - mdp5_ctl_reset_blend_regs(ctl); - - if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { - start_stage = STAGE0; - blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; - if (r_mixer) - r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; - } else { - start_stage = STAGE_BASE; - } - - for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { - blend_cfg |= - mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) | - mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i); - blend_ext_cfg |= - mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) | - mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i); - if (r_mixer) { - r_blend_cfg |= - mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) | - mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i); - r_blend_ext_cfg |= - mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) | - mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i); - } - } - - spin_lock_irqsave(&ctl->hw_lock, flags); - if (ctl->cursor_on) - blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; - - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); - ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), - blend_ext_cfg); - if (r_mixer) { - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), - r_blend_cfg); - ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), - r_blend_ext_cfg); - } - spin_unlock_irqrestore(&ctl->hw_lock, flags); - - ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); - if (r_mixer) - ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); - - DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm, - blend_cfg, blend_ext_cfg); - if (r_mixer) - DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", - r_mixer->lm, r_blend_cfg, r_blend_ext_cfg); - - return 0; -} - -u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf) -{ - if (intf->type == INTF_WB) - return MDP5_CTL_FLUSH_WB; - - switch (intf->num) { - case 0: return MDP5_CTL_FLUSH_TIMING_0; - case 1: return MDP5_CTL_FLUSH_TIMING_1; - case 2: return MDP5_CTL_FLUSH_TIMING_2; - case 3: return MDP5_CTL_FLUSH_TIMING_3; - default: return 0; - } -} - -u32 mdp_ctl_flush_mask_cursor(int cursor_id) -{ - switch (cursor_id) { - case 0: return MDP5_CTL_FLUSH_CURSOR_0; - case 1: return MDP5_CTL_FLUSH_CURSOR_1; - default: return 0; - } -} - -u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) -{ - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; - case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; - case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; - case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; - case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; - case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; - case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; - case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; - case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; - case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; - case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; - case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; - default: return 0; - } -} - -u32 mdp_ctl_flush_mask_lm(int lm) -{ - switch (lm) { - case 0: return MDP5_CTL_FLUSH_LM0; - case 1: return MDP5_CTL_FLUSH_LM1; - case 2: return MDP5_CTL_FLUSH_LM2; - case 5: return MDP5_CTL_FLUSH_LM5; - default: return 0; - } -} - -static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - u32 flush_mask) -{ - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - u32 sw_mask = 0; -#define BIT_NEEDS_SW_FIX(bit) \ - (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) - - /* for some targets, cursor bit is the same as LM bit */ - if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) - sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm); - - return sw_mask; -} - -static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, - u32 *flush_id) -{ - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - - if (ctl->pair) { - DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); - ctl->flush_pending = true; - ctl_mgr->single_flush_pending_mask |= (*flush_mask); - *flush_mask = 0; - - if (ctl->pair->flush_pending) { - *flush_id = min_t(u32, ctl->id, ctl->pair->id); - *flush_mask = ctl_mgr->single_flush_pending_mask; - - ctl->flush_pending = false; - ctl->pair->flush_pending = false; - ctl_mgr->single_flush_pending_mask = 0; - - DBG("Single FLUSH mask %x,ID %d", *flush_mask, - *flush_id); - } - } -} - -/** - * mdp5_ctl_commit() - Register Flush - * - * The flush register is used to indicate several registers are all - * programmed, and are safe to update to the back copy of the double - * buffered registers. - * - * Some registers FLUSH bits are shared when the hardware does not have - * dedicated bits for them; handling these is the job of fix_sw_flush(). - * - * CTL registers need to be flushed in some circumstances; if that is the - * case, some trigger bits will be present in both flush mask and - * ctl->pending_ctl_trigger. - * - * Return H/W flushed bit mask. - */ -u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, - struct mdp5_pipeline *pipeline, - u32 flush_mask) -{ - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - unsigned long flags; - u32 flush_id = ctl->id; - u32 curr_ctl_flush_mask; - - ctl->start_mask &= ~flush_mask; - - VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, - ctl->start_mask, ctl->pending_ctl_trigger); - - if (ctl->pending_ctl_trigger & flush_mask) { - flush_mask |= MDP5_CTL_FLUSH_CTL; - ctl->pending_ctl_trigger = 0; - } - - flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); - - flush_mask &= ctl_mgr->flush_hw_mask; - - curr_ctl_flush_mask = flush_mask; - - fix_for_single_flush(ctl, &flush_mask, &flush_id); - - if (flush_mask) { - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); - spin_unlock_irqrestore(&ctl->hw_lock, flags); - } - - if (start_signal_needed(ctl, pipeline)) { - send_start_signal(ctl); - refill_start_mask(ctl, pipeline); - } - - return curr_ctl_flush_mask; -} - -u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) -{ - return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); -} - -int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) -{ - return WARN_ON(!ctl) ? -EINVAL : ctl->id; -} - -/* - * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH - */ -int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) -{ - struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; - struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); - - /* do nothing silently if hw doesn't support */ - if (!ctl_mgr->single_flush_supported) - return 0; - - if (!enable) { - ctlx->pair = NULL; - ctly->pair = NULL; - mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); - return 0; - } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { - dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); - return -EINVAL; - } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { - dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); - return -EINVAL; - } - - ctlx->pair = ctly; - ctly->pair = ctlx; - - mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, - MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); - - return 0; -} - -/* - * mdp5_ctl_request() - CTL allocation - * - * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. - * If no CTL is available in preferred category, allocate from the other one. - * - * @return fail if no CTL is available. - */ -struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, - int intf_num) -{ - struct mdp5_ctl *ctl = NULL; - const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; - u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; - unsigned long flags; - int c; - - spin_lock_irqsave(&ctl_mgr->pool_lock, flags); - - /* search the preferred */ - for (c = 0; c < ctl_mgr->nctl; c++) - if ((ctl_mgr->ctls[c].status & checkm) == match) - goto found; - - dev_warn(ctl_mgr->dev->dev, - "fall back to the other CTL category for INTF %d!\n", intf_num); - - match ^= CTL_STAT_BOOKED; - for (c = 0; c < ctl_mgr->nctl; c++) - if ((ctl_mgr->ctls[c].status & checkm) == match) - goto found; - - dev_err(ctl_mgr->dev->dev, "No more CTL available!"); - goto unlock; - -found: - ctl = &ctl_mgr->ctls[c]; - ctl->status |= CTL_STAT_BUSY; - ctl->pending_ctl_trigger = 0; - DBG("CTL %d allocated", ctl->id); - -unlock: - spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); - return ctl; -} - -void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) -{ - unsigned long flags; - int c; - - for (c = 0; c < ctl_mgr->nctl; c++) { - struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; - - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); - spin_unlock_irqrestore(&ctl->hw_lock, flags); - } -} - -void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) -{ - kfree(ctl_mgr); -} - -struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) -{ - struct mdp5_ctl_manager *ctl_mgr; - const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); - int rev = mdp5_cfg_get_hw_rev(cfg_hnd); - const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; - unsigned long flags; - int c, ret; - - ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); - if (!ctl_mgr) { - dev_err(dev->dev, "failed to allocate CTL manager\n"); - ret = -ENOMEM; - goto fail; - } - - if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) { - dev_err(dev->dev, "Increase static pool size to at least %d\n", - ctl_cfg->count); - ret = -ENOSPC; - goto fail; - } - - /* initialize the CTL manager: */ - ctl_mgr->dev = dev; - ctl_mgr->nlm = hw_cfg->lm.count; - ctl_mgr->nctl = ctl_cfg->count; - ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; - spin_lock_init(&ctl_mgr->pool_lock); - - /* initialize each CTL of the pool: */ - spin_lock_irqsave(&ctl_mgr->pool_lock, flags); - for (c = 0; c < ctl_mgr->nctl; c++) { - struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; - - if (WARN_ON(!ctl_cfg->base[c])) { - dev_err(dev->dev, "CTL_%d: base is null!\n", c); - ret = -EINVAL; - spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); - goto fail; - } - ctl->ctlm = ctl_mgr; - ctl->id = c; - ctl->reg_offset = ctl_cfg->base[c]; - ctl->status = 0; - spin_lock_init(&ctl->hw_lock); - } - - /* - * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI - * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when - * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. - * Single FLUSH is supported from hw rev v3.0. - */ - if (rev >= 3) { - ctl_mgr->single_flush_supported = true; - /* Reserve CTL0/1 for INTF1/2 */ - ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; - ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; - } - spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); - DBG("Pool of %d CTLs created.", ctl_mgr->nctl); - - return ctl_mgr; - -fail: - if (ctl_mgr) - mdp5_ctlm_destroy(ctl_mgr); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h deleted file mode 100644 index b63120388dc6..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MDP5_CTL_H__ -#define __MDP5_CTL_H__ - -#include "msm_drv.h" - -/* - * CTL Manager prototypes: - * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler, - * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions. - */ -struct mdp5_ctl_manager; -struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); -void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); -void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); - -/* - * CTL prototypes: - * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, - * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. - */ -struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); - -int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); - -struct mdp5_interface; -struct mdp5_pipeline; -int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p); -int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p, - bool enabled); - -int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - int cursor_id, bool enable); -int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); - -#define MAX_PIPE_STAGE 2 - -/* - * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) - * - * @stage: array to contain the pipe num for each stage - * @stage_cnt: valid stage number in stage array - * @ctl_blend_op_flags: blender operation mode flags - * - * Note: - * CTL registers need to be flushed after calling this function - * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) - */ -#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) -int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - enum mdp5_pipe stage[][MAX_PIPE_STAGE], - enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], - u32 stage_cnt, u32 ctl_blend_op_flags); - -/** - * mdp_ctl_flush_mask...() - Register FLUSH masks - * - * These masks are used to specify which block(s) need to be flushed - * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask). - */ -u32 mdp_ctl_flush_mask_lm(int lm); -u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe); -u32 mdp_ctl_flush_mask_cursor(int cursor_id); -u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); - -/* @flush_mask: see CTL flush masks definitions below */ -u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - u32 flush_mask); -u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); - - - -#endif /* __MDP5_CTL_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c deleted file mode 100644 index 36ad3cbe5f79..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Copyright (c) 2014, The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include "mdp5_kms.h" - -static struct mdp5_kms *get_kms(struct drm_encoder *encoder) -{ - struct msm_drm_private *priv = encoder->dev->dev_private; - return to_mdp5_kms(to_mdp_kms(priv->kms)); -} - -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include -#include -#include -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; - -static void bs_init(struct mdp5_encoder *mdp5_encoder) -{ - mdp5_encoder->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); - DBG("bus scale client: %08x", mdp5_encoder->bsc); -} - -static void bs_fini(struct mdp5_encoder *mdp5_encoder) -{ - if (mdp5_encoder->bsc) { - msm_bus_scale_unregister_client(mdp5_encoder->bsc); - mdp5_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) -{ - if (mdp5_encoder->bsc) { - DBG("set bus scaling: %d", idx); - /* HACK: scaling down, and then immediately back up - * seems to leave things broken (underflow).. so - * never disable: - */ - idx = 1; - msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp5_encoder *mdp5_encoder) {} -static void bs_fini(struct mdp5_encoder *mdp5_encoder) {} -static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {} -#endif - -static void mdp5_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - bs_fini(mdp5_encoder); - drm_encoder_cleanup(encoder); - kfree(mdp5_encoder); -} - -static const struct drm_encoder_funcs mdp5_encoder_funcs = { - .destroy = mdp5_encoder_destroy, -}; - -static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - int intf = mdp5_encoder->intf->num; - uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; - uint32_t display_v_start, display_v_end; - uint32_t hsync_start_x, hsync_end_x; - uint32_t format = 0x2100; - unsigned long flags; - - mode = adjusted_mode; - - DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); - - ctrl_pol = 0; - - /* DSI controller cannot handle active-low sync signals. */ - if (mdp5_encoder->intf->type != INTF_DSI) { - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; - } - /* probably need to get DATA_EN polarity from panel.. */ - - dtv_hsync_skew = 0; /* get this from panel? */ - - /* Get color format from panel, default is 8bpc */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - switch (connector->display_info.bpc) { - case 4: - format |= 0; - break; - case 5: - format |= 0x15; - break; - case 6: - format |= 0x2A; - break; - case 8: - default: - format |= 0x3F; - break; - } - break; - } - } - - hsync_start_x = (mode->htotal - mode->hsync_start); - hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; - - vsync_period = mode->vtotal * mode->htotal; - vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; - display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; - display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; - - /* - * For edp only: - * DISPLAY_V_START = (VBP * HCYCLE) + HBP - * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP - */ - if (mdp5_encoder->intf->type == INTF_eDP) { - display_v_start += mode->htotal - mode->hsync_start; - display_v_end -= mode->hsync_start - mode->hdisplay; - } - - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - - mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), - MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | - MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); - mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period); - mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len); - mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf), - MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) | - MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x)); - mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start); - mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end); - mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0); - mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff); - mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew); - mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol); - mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf), - MDP5_INTF_ACTIVE_HCTL_START(0) | - MDP5_INTF_ACTIVE_HCTL_END(0)); - mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0); - mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); - mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); - mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ - - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - - mdp5_crtc_set_pipeline(encoder->crtc); -} - -static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_encoder->ctl; - struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); - struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); - struct mdp5_interface *intf = mdp5_encoder->intf; - int intfn = mdp5_encoder->intf->num; - unsigned long flags; - - if (WARN_ON(!mdp5_encoder->enabled)) - return; - - mdp5_ctl_set_encoder_state(ctl, pipeline, false); - - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf)); - - bs_set(mdp5_encoder, 0); - - mdp5_encoder->enabled = false; -} - -static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_encoder->ctl; - struct mdp5_interface *intf = mdp5_encoder->intf; - struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); - int intfn = intf->num; - unsigned long flags; - - if (WARN_ON(mdp5_encoder->enabled)) - return; - - bs_set(mdp5_encoder, 1); - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); - - mdp5_ctl_set_encoder_state(ctl, pipeline, true); - - mdp5_encoder->enabled = true; -} - -static void mdp5_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = mdp5_encoder->intf; - - if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); - else - mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); -} - -static void mdp5_encoder_disable(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = mdp5_encoder->intf; - - if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_cmd_encoder_disable(encoder); - else - mdp5_vid_encoder_disable(encoder); -} - -static void mdp5_encoder_enable(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = mdp5_encoder->intf; - /* this isn't right I think */ - struct drm_crtc_state *cstate = encoder->crtc->state; - - mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode); - - if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_cmd_encoder_enable(encoder); - else - mdp5_vid_encoder_enable(encoder); -} - -static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state); - struct mdp5_interface *intf = mdp5_encoder->intf; - struct mdp5_ctl *ctl = mdp5_encoder->ctl; - - mdp5_cstate->ctl = ctl; - mdp5_cstate->pipeline.intf = intf; - - return 0; -} - -static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { - .disable = mdp5_encoder_disable, - .enable = mdp5_encoder_enable, - .atomic_check = mdp5_encoder_atomic_check, -}; - -int mdp5_encoder_get_linecount(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf->num; - - return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf)); -} - -u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf->num; - - return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); -} - -int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); - struct mdp5_kms *mdp5_kms; - struct device *dev; - int intf_num; - u32 data = 0; - - if (!encoder || !slave_encoder) - return -EINVAL; - - mdp5_kms = get_kms(encoder); - intf_num = mdp5_encoder->intf->num; - - /* Switch slave encoder's TimingGen Sync mode, - * to use the master's enable signal for the slave encoder. - */ - if (intf_num == 1) - data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; - else if (intf_num == 2) - data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; - else - return -EINVAL; - - dev = &mdp5_kms->pdev->dev; - /* Make sure clocks are on when connectors calling this function. */ - pm_runtime_get_sync(dev); - - /* Dumb Panel, Sync mode */ - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); - mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); - - mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); - - pm_runtime_put_sync(dev); - - return 0; -} - -void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = mdp5_encoder->intf; - - /* TODO: Expand this to set writeback modes too */ - if (cmd_mode) { - WARN_ON(intf->type != INTF_DSI); - intf->mode = MDP5_INTF_DSI_MODE_COMMAND; - } else { - if (intf->type == INTF_DSI) - intf->mode = MDP5_INTF_DSI_MODE_VIDEO; - else - intf->mode = MDP5_INTF_MODE_NONE; - } -} - -/* initialize encoder */ -struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, - struct mdp5_ctl *ctl) -{ - struct drm_encoder *encoder = NULL; - struct mdp5_encoder *mdp5_encoder; - int enc_type = (intf->type == INTF_DSI) ? - DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS; - int ret; - - mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); - if (!mdp5_encoder) { - ret = -ENOMEM; - goto fail; - } - - encoder = &mdp5_encoder->base; - mdp5_encoder->ctl = ctl; - mdp5_encoder->intf = intf; - - spin_lock_init(&mdp5_encoder->intf_lock); - - drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL); - - drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); - - bs_init(mdp5_encoder); - - return encoder; - -fail: - if (encoder) - mdp5_encoder_destroy(encoder); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c deleted file mode 100644 index 280e368bc9bb..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include - -#include - -#include "msm_drv.h" -#include "mdp5_kms.h" - -void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask) -{ - mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR, - irqmask ^ (irqmask & old_irqmask)); - mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); -} - -static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler); - static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); - extern bool dumpstate; - - DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); - - if (dumpstate && __ratelimit(&rs)) { - struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev); - drm_state_dump(mdp5_kms->dev, &p); - if (mdp5_kms->smp) - mdp5_smp_dump(mdp5_kms->smp, &p); - } -} - -void mdp5_irq_preinstall(struct msm_kms *kms) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - - pm_runtime_get_sync(dev); - mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); - mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); - pm_runtime_put_sync(dev); -} - -int mdp5_irq_postinstall(struct msm_kms *kms) -{ - struct mdp_kms *mdp_kms = to_mdp_kms(kms); - struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); - struct device *dev = &mdp5_kms->pdev->dev; - struct mdp_irq *error_handler = &mdp5_kms->error_handler; - - error_handler->irq = mdp5_irq_error_handler; - error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN | - MDP5_IRQ_INTF1_UNDER_RUN | - MDP5_IRQ_INTF2_UNDER_RUN | - MDP5_IRQ_INTF3_UNDER_RUN; - - pm_runtime_get_sync(dev); - mdp_irq_register(mdp_kms, error_handler); - pm_runtime_put_sync(dev); - - return 0; -} - -void mdp5_irq_uninstall(struct msm_kms *kms) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - - pm_runtime_get_sync(dev); - mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); - pm_runtime_put_sync(dev); -} - -irqreturn_t mdp5_irq(struct msm_kms *kms) -{ - struct mdp_kms *mdp_kms = to_mdp_kms(kms); - struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); - struct drm_device *dev = mdp5_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - unsigned int id; - uint32_t status, enable; - - enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN); - status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable; - mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); - - VERB("status=%08x", status); - - mdp_dispatch_irqs(mdp_kms, status); - - for (id = 0; id < priv->num_crtcs; id++) - if (status & mdp5_crtc_vblank(priv->crtcs[id])) - drm_handle_vblank(dev, id); - - return IRQ_HANDLED; -} - -int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - - pm_runtime_get_sync(dev); - mdp_update_vblank_mask(to_mdp_kms(kms), - mdp5_crtc_vblank(crtc), true); - pm_runtime_put_sync(dev); - - return 0; -} - -void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - - pm_runtime_get_sync(dev); - mdp_update_vblank_mask(to_mdp_kms(kms), - mdp5_crtc_vblank(crtc), false); - pm_runtime_put_sync(dev); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c deleted file mode 100644 index 6d8e3a9a6fc0..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ /dev/null @@ -1,1067 +0,0 @@ -/* - * Copyright (c) 2014, The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include - -#include "msm_drv.h" -#include "msm_gem.h" -#include "msm_mmu.h" -#include "mdp5_kms.h" - -static const char *iommu_ports[] = { - "mdp_0", -}; - -static int mdp5_hw_init(struct msm_kms *kms) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - unsigned long flags; - - pm_runtime_get_sync(dev); - - /* Magic unknown register writes: - * - * W VBIF:0x004 00000001 (mdss_mdp.c:839) - * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) - * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) - * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) - * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) - * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) - * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) - * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) - * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) - * - * Downstream fbdev driver gets these register offsets/values - * from DT.. not really sure what these registers are or if - * different values for different boards/SoC's, etc. I guess - * they are the golden registers. - * - * Not setting these does not seem to cause any problem. But - * we may be getting lucky with the bootloader initializing - * them for us. OTOH, if we can always count on the bootloader - * setting the golden registers, then perhaps we don't need to - * care. - */ - - spin_lock_irqsave(&mdp5_kms->resource_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); - spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); - - mdp5_ctlm_hw_reset(mdp5_kms->ctlm); - - pm_runtime_put_sync(dev); - - return 0; -} - -struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) -{ - struct msm_drm_private *priv = s->dev->dev_private; - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct msm_kms_state *state = to_kms_state(s); - struct mdp5_state *new_state; - int ret; - - if (state->state) - return state->state; - - ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx); - if (ret) - return ERR_PTR(ret); - - new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); - if (!new_state) - return ERR_PTR(-ENOMEM); - - /* Copy state: */ - new_state->hwpipe = mdp5_kms->state->hwpipe; - new_state->hwmixer = mdp5_kms->state->hwmixer; - if (mdp5_kms->smp) - new_state->smp = mdp5_kms->state->smp; - - state->state = new_state; - - return new_state; -} - -static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - swap(to_kms_state(state)->state, mdp5_kms->state); -} - -static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - - pm_runtime_get_sync(dev); - - if (mdp5_kms->smp) - mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); -} - -static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct device *dev = &mdp5_kms->pdev->dev; - - if (mdp5_kms->smp) - mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); - - pm_runtime_put_sync(dev); -} - -static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, - struct drm_crtc *crtc) -{ - mdp5_crtc_wait_for_commit_done(crtc); -} - -static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, - struct drm_encoder *encoder) -{ - return rate; -} - -static int mdp5_set_split_display(struct msm_kms *kms, - struct drm_encoder *encoder, - struct drm_encoder *slave_encoder, - bool is_cmd_mode) -{ - if (is_cmd_mode) - return mdp5_cmd_encoder_set_split_display(encoder, - slave_encoder); - else - return mdp5_vid_encoder_set_split_display(encoder, - slave_encoder); -} - -static void mdp5_set_encoder_mode(struct msm_kms *kms, - struct drm_encoder *encoder, - bool cmd_mode) -{ - mdp5_encoder_set_intf_mode(encoder, cmd_mode); -} - -static void mdp5_kms_destroy(struct msm_kms *kms) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_gem_address_space *aspace = kms->aspace; - int i; - - for (i = 0; i < mdp5_kms->num_hwmixers; i++) - mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); - - for (i = 0; i < mdp5_kms->num_hwpipes; i++) - mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); - - if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_put(aspace); - } -} - -#ifdef CONFIG_DEBUG_FS -static int smp_show(struct seq_file *m, void *arg) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct msm_drm_private *priv = dev->dev_private; - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct drm_printer p = drm_seq_file_printer(m); - - if (!mdp5_kms->smp) { - drm_printf(&p, "no SMP pool\n"); - return 0; - } - - mdp5_smp_dump(mdp5_kms->smp, &p); - - return 0; -} - -static struct drm_info_list mdp5_debugfs_list[] = { - {"smp", smp_show }, -}; - -static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) -{ - struct drm_device *dev = minor->dev; - int ret; - - ret = drm_debugfs_create_files(mdp5_debugfs_list, - ARRAY_SIZE(mdp5_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - dev_err(dev->dev, "could not install mdp5_debugfs_list\n"); - return ret; - } - - return 0; -} -#endif - -static const struct mdp_kms_funcs kms_funcs = { - .base = { - .hw_init = mdp5_hw_init, - .irq_preinstall = mdp5_irq_preinstall, - .irq_postinstall = mdp5_irq_postinstall, - .irq_uninstall = mdp5_irq_uninstall, - .irq = mdp5_irq, - .enable_vblank = mdp5_enable_vblank, - .disable_vblank = mdp5_disable_vblank, - .swap_state = mdp5_swap_state, - .prepare_commit = mdp5_prepare_commit, - .complete_commit = mdp5_complete_commit, - .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, - .get_format = mdp_get_format, - .round_pixclk = mdp5_round_pixclk, - .set_split_display = mdp5_set_split_display, - .set_encoder_mode = mdp5_set_encoder_mode, - .destroy = mdp5_kms_destroy, -#ifdef CONFIG_DEBUG_FS - .debugfs_init = mdp5_kms_debugfs_init, -#endif - }, - .set_irqmask = mdp5_set_irqmask, -}; - -int mdp5_disable(struct mdp5_kms *mdp5_kms) -{ - DBG(""); - - mdp5_kms->enable_count--; - WARN_ON(mdp5_kms->enable_count < 0); - - clk_disable_unprepare(mdp5_kms->ahb_clk); - clk_disable_unprepare(mdp5_kms->axi_clk); - clk_disable_unprepare(mdp5_kms->core_clk); - if (mdp5_kms->lut_clk) - clk_disable_unprepare(mdp5_kms->lut_clk); - - return 0; -} - -int mdp5_enable(struct mdp5_kms *mdp5_kms) -{ - DBG(""); - - mdp5_kms->enable_count++; - - clk_prepare_enable(mdp5_kms->ahb_clk); - clk_prepare_enable(mdp5_kms->axi_clk); - clk_prepare_enable(mdp5_kms->core_clk); - if (mdp5_kms->lut_clk) - clk_prepare_enable(mdp5_kms->lut_clk); - - return 0; -} - -static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, - struct mdp5_interface *intf, - struct mdp5_ctl *ctl) -{ - struct drm_device *dev = mdp5_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - struct drm_encoder *encoder; - - encoder = mdp5_encoder_init(dev, intf, ctl); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct encoder\n"); - return encoder; - } - - priv->encoders[priv->num_encoders++] = encoder; - - return encoder; -} - -static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) -{ - const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; - const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); - int id = 0, i; - - for (i = 0; i < intf_cnt; i++) { - if (intfs[i] == INTF_DSI) { - if (intf_num == i) - return id; - - id++; - } - } - - return -EINVAL; -} - -static int modeset_init_intf(struct mdp5_kms *mdp5_kms, - struct mdp5_interface *intf) -{ - struct drm_device *dev = mdp5_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; - struct mdp5_ctl *ctl; - struct drm_encoder *encoder; - int ret = 0; - - switch (intf->type) { - case INTF_eDP: - if (!priv->edp) - break; - - ctl = mdp5_ctlm_request(ctlm, intf->num); - if (!ctl) { - ret = -EINVAL; - break; - } - - encoder = construct_encoder(mdp5_kms, intf, ctl); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); - break; - } - - ret = msm_edp_modeset_init(priv->edp, dev, encoder); - break; - case INTF_HDMI: - if (!priv->hdmi) - break; - - ctl = mdp5_ctlm_request(ctlm, intf->num); - if (!ctl) { - ret = -EINVAL; - break; - } - - encoder = construct_encoder(mdp5_kms, intf, ctl); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); - break; - } - - ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); - break; - case INTF_DSI: - { - const struct mdp5_cfg_hw *hw_cfg = - mdp5_cfg_get_hw_config(mdp5_kms->cfg); - int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); - - if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { - dev_err(dev->dev, "failed to find dsi from intf %d\n", - intf->num); - ret = -EINVAL; - break; - } - - if (!priv->dsi[dsi_id]) - break; - - ctl = mdp5_ctlm_request(ctlm, intf->num); - if (!ctl) { - ret = -EINVAL; - break; - } - - encoder = construct_encoder(mdp5_kms, intf, ctl); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); - break; - } - - ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); - break; - } - default: - dev_err(dev->dev, "unknown intf: %d\n", intf->type); - ret = -EINVAL; - break; - } - - return ret; -} - -static int modeset_init(struct mdp5_kms *mdp5_kms) -{ - struct drm_device *dev = mdp5_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - const struct mdp5_cfg_hw *hw_cfg; - unsigned int num_crtcs; - int i, ret, pi = 0, ci = 0; - struct drm_plane *primary[MAX_BASES] = { NULL }; - struct drm_plane *cursor[MAX_BASES] = { NULL }; - - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - - /* - * Construct encoders and modeset initialize connector devices - * for each external display interface. - */ - for (i = 0; i < mdp5_kms->num_intfs; i++) { - ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); - if (ret) - goto fail; - } - - /* - * We should ideally have less number of encoders (set up by parsing - * the MDP5 interfaces) than the number of layer mixers present in HW, - * but let's be safe here anyway - */ - num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); - - /* - * Construct planes equaling the number of hw pipes, and CRTCs for the - * N encoders set up by the driver. The first N planes become primary - * planes for the CRTCs, with the remainder as overlay planes: - */ - for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; - struct drm_plane *plane; - enum drm_plane_type type; - - if (i < num_crtcs) - type = DRM_PLANE_TYPE_PRIMARY; - else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) - type = DRM_PLANE_TYPE_CURSOR; - else - type = DRM_PLANE_TYPE_OVERLAY; - - plane = mdp5_plane_init(dev, type); - if (IS_ERR(plane)) { - ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); - goto fail; - } - priv->planes[priv->num_planes++] = plane; - - if (type == DRM_PLANE_TYPE_PRIMARY) - primary[pi++] = plane; - if (type == DRM_PLANE_TYPE_CURSOR) - cursor[ci++] = plane; - } - - for (i = 0; i < num_crtcs; i++) { - struct drm_crtc *crtc; - - crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); - if (IS_ERR(crtc)) { - ret = PTR_ERR(crtc); - dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); - goto fail; - } - priv->crtcs[priv->num_crtcs++] = crtc; - } - - /* - * Now that we know the number of crtcs we've created, set the possible - * crtcs for the encoders - */ - for (i = 0; i < priv->num_encoders; i++) { - struct drm_encoder *encoder = priv->encoders[i]; - - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; - } - - return 0; - -fail: - return ret; -} - -static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, - u32 *major, u32 *minor) -{ - struct device *dev = &mdp5_kms->pdev->dev; - u32 version; - - pm_runtime_get_sync(dev); - version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); - pm_runtime_put_sync(dev); - - *major = FIELD(version, MDP5_HW_VERSION_MAJOR); - *minor = FIELD(version, MDP5_HW_VERSION_MINOR); - - dev_info(dev, "MDP5 version v%d.%d", *major, *minor); -} - -static int get_clk(struct platform_device *pdev, struct clk **clkp, - const char *name, bool mandatory) -{ - struct device *dev = &pdev->dev; - struct clk *clk = msm_clk_get(pdev, name); - if (IS_ERR(clk) && mandatory) { - dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); - return PTR_ERR(clk); - } - if (IS_ERR(clk)) - DBG("skipping %s", name); - else - *clkp = clk; - - return 0; -} - -static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_encoder *encoder; - - drm_for_each_encoder(encoder, dev) - if (encoder->crtc == crtc) - return encoder; - - return NULL; -} - -static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) -{ - struct msm_drm_private *priv = dev->dev_private; - struct drm_crtc *crtc; - struct drm_encoder *encoder; - int line, vsw, vbp, vactive_start, vactive_end, vfp_end; - - crtc = priv->crtcs[pipe]; - if (!crtc) { - DRM_ERROR("Invalid crtc %d\n", pipe); - return false; - } - - encoder = get_encoder_from_crtc(crtc); - if (!encoder) { - DRM_ERROR("no encoder found for crtc %d\n", pipe); - return false; - } - - vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; - vbp = mode->crtc_vtotal - mode->crtc_vsync_end; - - /* - * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at - * the end of VFP. Translate the porch values relative to the line - * counter positions. - */ - - vactive_start = vsw + vbp + 1; - - vactive_end = vactive_start + mode->crtc_vdisplay; - - /* last scan line before VSYNC */ - vfp_end = mode->crtc_vtotal; - - if (stime) - *stime = ktime_get(); - - line = mdp5_encoder_get_linecount(encoder); - - if (line < vactive_start) { - line -= vactive_start; - } else if (line > vactive_end) { - line = line - vfp_end - vactive_start; - } else { - line -= vactive_start; - } - - *vpos = line; - *hpos = 0; - - if (etime) - *etime = ktime_get(); - - return true; -} - -static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) -{ - struct msm_drm_private *priv = dev->dev_private; - struct drm_crtc *crtc; - struct drm_encoder *encoder; - - if (pipe >= priv->num_crtcs) - return 0; - - crtc = priv->crtcs[pipe]; - if (!crtc) - return 0; - - encoder = get_encoder_from_crtc(crtc); - if (!encoder) - return 0; - - return mdp5_encoder_get_framecount(encoder); -} - -struct msm_kms *mdp5_kms_init(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct platform_device *pdev; - struct mdp5_kms *mdp5_kms; - struct mdp5_cfg *config; - struct msm_kms *kms; - struct msm_gem_address_space *aspace; - int irq, i, ret; - - /* priv->kms would have been populated by the MDP5 driver */ - kms = priv->kms; - if (!kms) - return NULL; - - mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - - mdp_kms_init(&mdp5_kms->base, &kms_funcs); - - pdev = mdp5_kms->pdev; - - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (irq < 0) { - ret = irq; - dev_err(&pdev->dev, "failed to get irq: %d\n", ret); - goto fail; - } - - kms->irq = irq; - - config = mdp5_cfg_get_config(mdp5_kms->cfg); - - /* make sure things are off before attaching iommu (bootloader could - * have left things on, in which case we'll start getting faults if - * we don't disable): - */ - pm_runtime_get_sync(&pdev->dev); - for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { - if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || - !config->hw->intf.base[i]) - continue; - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); - - mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); - } - mdelay(16); - - if (config->platform.iommu) { - aspace = msm_gem_address_space_create(&pdev->dev, - config->platform.iommu, "mdp5"); - if (IS_ERR(aspace)) { - ret = PTR_ERR(aspace); - goto fail; - } - - kms->aspace = aspace; - - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) { - dev_err(&pdev->dev, "failed to attach iommu: %d\n", - ret); - goto fail; - } - } else { - dev_info(&pdev->dev, - "no iommu, fallback to phys contig buffers for scanout\n"); - aspace = NULL; - } - - pm_runtime_put_sync(&pdev->dev); - - ret = modeset_init(mdp5_kms); - if (ret) { - dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); - goto fail; - } - - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - dev->mode_config.max_width = 0xffff; - dev->mode_config.max_height = 0xffff; - - dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; - dev->driver->get_scanout_position = mdp5_get_scanoutpos; - dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; - dev->vblank_disable_immediate = true; - - return kms; -fail: - if (kms) - mdp5_kms_destroy(kms); - return ERR_PTR(ret); -} - -static void mdp5_destroy(struct platform_device *pdev) -{ - struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); - int i; - - if (mdp5_kms->ctlm) - mdp5_ctlm_destroy(mdp5_kms->ctlm); - if (mdp5_kms->smp) - mdp5_smp_destroy(mdp5_kms->smp); - if (mdp5_kms->cfg) - mdp5_cfg_destroy(mdp5_kms->cfg); - - for (i = 0; i < mdp5_kms->num_intfs; i++) - kfree(mdp5_kms->intfs[i]); - - if (mdp5_kms->rpm_enabled) - pm_runtime_disable(&pdev->dev); - - kfree(mdp5_kms->state); -} - -static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, - const enum mdp5_pipe *pipes, const uint32_t *offsets, - uint32_t caps) -{ - struct drm_device *dev = mdp5_kms->dev; - int i, ret; - - for (i = 0; i < cnt; i++) { - struct mdp5_hw_pipe *hwpipe; - - hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); - if (IS_ERR(hwpipe)) { - ret = PTR_ERR(hwpipe); - dev_err(dev->dev, "failed to construct pipe for %s (%d)\n", - pipe2name(pipes[i]), ret); - return ret; - } - hwpipe->idx = mdp5_kms->num_hwpipes; - mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; - } - - return 0; -} - -static int hwpipe_init(struct mdp5_kms *mdp5_kms) -{ - static const enum mdp5_pipe rgb_planes[] = { - SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, - }; - static const enum mdp5_pipe vig_planes[] = { - SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, - }; - static const enum mdp5_pipe dma_planes[] = { - SSPP_DMA0, SSPP_DMA1, - }; - static const enum mdp5_pipe cursor_planes[] = { - SSPP_CURSOR0, SSPP_CURSOR1, - }; - const struct mdp5_cfg_hw *hw_cfg; - int ret; - - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - - /* Construct RGB pipes: */ - ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, - hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); - if (ret) - return ret; - - /* Construct video (VIG) pipes: */ - ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, - hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); - if (ret) - return ret; - - /* Construct DMA pipes: */ - ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, - hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); - if (ret) - return ret; - - /* Construct cursor pipes: */ - ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, - cursor_planes, hw_cfg->pipe_cursor.base, - hw_cfg->pipe_cursor.caps); - if (ret) - return ret; - - return 0; -} - -static int hwmixer_init(struct mdp5_kms *mdp5_kms) -{ - struct drm_device *dev = mdp5_kms->dev; - const struct mdp5_cfg_hw *hw_cfg; - int i, ret; - - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - - for (i = 0; i < hw_cfg->lm.count; i++) { - struct mdp5_hw_mixer *mixer; - - mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); - if (IS_ERR(mixer)) { - ret = PTR_ERR(mixer); - dev_err(dev->dev, "failed to construct LM%d (%d)\n", - i, ret); - return ret; - } - - mixer->idx = mdp5_kms->num_hwmixers; - mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; - } - - return 0; -} - -static int interface_init(struct mdp5_kms *mdp5_kms) -{ - struct drm_device *dev = mdp5_kms->dev; - const struct mdp5_cfg_hw *hw_cfg; - const enum mdp5_intf_type *intf_types; - int i; - - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - intf_types = hw_cfg->intf.connect; - - for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { - struct mdp5_interface *intf; - - if (intf_types[i] == INTF_DISABLED) - continue; - - intf = kzalloc(sizeof(*intf), GFP_KERNEL); - if (!intf) { - dev_err(dev->dev, "failed to construct INTF%d\n", i); - return -ENOMEM; - } - - intf->num = i; - intf->type = intf_types[i]; - intf->mode = MDP5_INTF_MODE_NONE; - intf->idx = mdp5_kms->num_intfs; - mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; - } - - return 0; -} - -static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct mdp5_kms *mdp5_kms; - struct mdp5_cfg *config; - u32 major, minor; - int ret; - - mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); - if (!mdp5_kms) { - ret = -ENOMEM; - goto fail; - } - - platform_set_drvdata(pdev, mdp5_kms); - - spin_lock_init(&mdp5_kms->resource_lock); - - mdp5_kms->dev = dev; - mdp5_kms->pdev = pdev; - - drm_modeset_lock_init(&mdp5_kms->state_lock); - mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); - if (!mdp5_kms->state) { - ret = -ENOMEM; - goto fail; - } - - mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); - if (IS_ERR(mdp5_kms->mmio)) { - ret = PTR_ERR(mdp5_kms->mmio); - goto fail; - } - - /* mandatory clocks: */ - ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); - if (ret) - goto fail; - ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); - if (ret) - goto fail; - ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); - if (ret) - goto fail; - ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); - if (ret) - goto fail; - - /* optional clocks: */ - get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); - - /* we need to set a default rate before enabling. Set a safe - * rate first, then figure out hw revision, and then set a - * more optimal rate: - */ - clk_set_rate(mdp5_kms->core_clk, 200000000); - - pm_runtime_enable(&pdev->dev); - mdp5_kms->rpm_enabled = true; - - read_mdp_hw_revision(mdp5_kms, &major, &minor); - - mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); - if (IS_ERR(mdp5_kms->cfg)) { - ret = PTR_ERR(mdp5_kms->cfg); - mdp5_kms->cfg = NULL; - goto fail; - } - - config = mdp5_cfg_get_config(mdp5_kms->cfg); - mdp5_kms->caps = config->hw->mdp.caps; - - /* TODO: compute core clock rate at runtime */ - clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); - - /* - * Some chipsets have a Shared Memory Pool (SMP), while others - * have dedicated latency buffering per source pipe instead; - * this section initializes the SMP: - */ - if (mdp5_kms->caps & MDP_CAP_SMP) { - mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); - if (IS_ERR(mdp5_kms->smp)) { - ret = PTR_ERR(mdp5_kms->smp); - mdp5_kms->smp = NULL; - goto fail; - } - } - - mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); - if (IS_ERR(mdp5_kms->ctlm)) { - ret = PTR_ERR(mdp5_kms->ctlm); - mdp5_kms->ctlm = NULL; - goto fail; - } - - ret = hwpipe_init(mdp5_kms); - if (ret) - goto fail; - - ret = hwmixer_init(mdp5_kms); - if (ret) - goto fail; - - ret = interface_init(mdp5_kms); - if (ret) - goto fail; - - /* set uninit-ed kms */ - priv->kms = &mdp5_kms->base.base; - - return 0; -fail: - mdp5_destroy(pdev); - return ret; -} - -static int mdp5_bind(struct device *dev, struct device *master, void *data) -{ - struct drm_device *ddev = dev_get_drvdata(master); - struct platform_device *pdev = to_platform_device(dev); - - DBG(""); - - return mdp5_init(pdev, ddev); -} - -static void mdp5_unbind(struct device *dev, struct device *master, - void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - - mdp5_destroy(pdev); -} - -static const struct component_ops mdp5_ops = { - .bind = mdp5_bind, - .unbind = mdp5_unbind, -}; - -static int mdp5_dev_probe(struct platform_device *pdev) -{ - DBG(""); - return component_add(&pdev->dev, &mdp5_ops); -} - -static int mdp5_dev_remove(struct platform_device *pdev) -{ - DBG(""); - component_del(&pdev->dev, &mdp5_ops); - return 0; -} - -static __maybe_unused int mdp5_runtime_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); - - DBG(""); - - return mdp5_disable(mdp5_kms); -} - -static __maybe_unused int mdp5_runtime_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); - - DBG(""); - - return mdp5_enable(mdp5_kms); -} - -static const struct dev_pm_ops mdp5_pm_ops = { - SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) -}; - -static const struct of_device_id mdp5_dt_match[] = { - { .compatible = "qcom,mdp5", }, - /* to support downstream DT files */ - { .compatible = "qcom,mdss_mdp", }, - {} -}; -MODULE_DEVICE_TABLE(of, mdp5_dt_match); - -static struct platform_driver mdp5_driver = { - .probe = mdp5_dev_probe, - .remove = mdp5_dev_remove, - .driver = { - .name = "msm_mdp", - .of_match_table = mdp5_dt_match, - .pm = &mdp5_pm_ops, - }, -}; - -void __init msm_mdp_register(void) -{ - DBG(""); - platform_driver_register(&mdp5_driver); -} - -void __exit msm_mdp_unregister(void) -{ - DBG(""); - platform_driver_unregister(&mdp5_driver); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h deleted file mode 100644 index 9b3fe01089d1..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MDP5_KMS_H__ -#define __MDP5_KMS_H__ - -#include "msm_drv.h" -#include "msm_kms.h" -#include "mdp/mdp_kms.h" -#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ -#include "mdp5.xml.h" -#include "mdp5_pipe.h" -#include "mdp5_mixer.h" -#include "mdp5_ctl.h" -#include "mdp5_smp.h" - -struct mdp5_state; - -struct mdp5_kms { - struct mdp_kms base; - - struct drm_device *dev; - - struct platform_device *pdev; - - unsigned num_hwpipes; - struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; - - unsigned num_hwmixers; - struct mdp5_hw_mixer *hwmixers[8]; - - unsigned num_intfs; - struct mdp5_interface *intfs[5]; - - struct mdp5_cfg_handler *cfg; - uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ - - /** - * Global atomic state. Do not access directly, use mdp5_get_state() - */ - struct mdp5_state *state; - struct drm_modeset_lock state_lock; - - struct mdp5_smp *smp; - struct mdp5_ctl_manager *ctlm; - - /* io/register spaces: */ - void __iomem *mmio; - - struct clk *axi_clk; - struct clk *ahb_clk; - struct clk *core_clk; - struct clk *lut_clk; - struct clk *vsync_clk; - - /* - * lock to protect access to global resources: ie., following register: - * - REG_MDP5_DISP_INTF_SEL - */ - spinlock_t resource_lock; - - bool rpm_enabled; - - struct mdp_irq error_handler; - - int enable_count; -}; -#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) - -/* Global atomic state for tracking resources that are shared across - * multiple kms objects (planes/crtcs/etc). - * - * For atomic updates which require modifying global state, - */ -struct mdp5_state { - struct mdp5_hw_pipe_state hwpipe; - struct mdp5_hw_mixer_state hwmixer; - struct mdp5_smp_state smp; -}; - -struct mdp5_state *__must_check -mdp5_get_state(struct drm_atomic_state *s); - -/* Atomic plane state. Subclasses the base drm_plane_state in order to - * track assigned hwpipe and hw specific state. - */ -struct mdp5_plane_state { - struct drm_plane_state base; - - struct mdp5_hw_pipe *hwpipe; - struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */ - - /* aligned with property */ - uint8_t premultiplied; - uint8_t zpos; - uint8_t alpha; - - /* assigned by crtc blender */ - enum mdp_mixer_stage_id stage; -}; -#define to_mdp5_plane_state(x) \ - container_of(x, struct mdp5_plane_state, base) - -struct mdp5_pipeline { - struct mdp5_interface *intf; - struct mdp5_hw_mixer *mixer; - struct mdp5_hw_mixer *r_mixer; /* right mixer */ -}; - -struct mdp5_crtc_state { - struct drm_crtc_state base; - - struct mdp5_ctl *ctl; - struct mdp5_pipeline pipeline; - - /* these are derivatives of intf/mixer state in mdp5_pipeline */ - u32 vblank_irqmask; - u32 err_irqmask; - u32 pp_done_irqmask; - - bool cmd_mode; -}; -#define to_mdp5_crtc_state(x) \ - container_of(x, struct mdp5_crtc_state, base) - -enum mdp5_intf_mode { - MDP5_INTF_MODE_NONE = 0, - - /* Modes used for DSI interface (INTF_DSI type): */ - MDP5_INTF_DSI_MODE_VIDEO, - MDP5_INTF_DSI_MODE_COMMAND, - - /* Modes used for WB interface (INTF_WB type): */ - MDP5_INTF_WB_MODE_BLOCK, - MDP5_INTF_WB_MODE_LINE, -}; - -struct mdp5_interface { - int idx; - int num; /* display interface number */ - enum mdp5_intf_type type; - enum mdp5_intf_mode mode; -}; - -struct mdp5_encoder { - struct drm_encoder base; - spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ - bool enabled; - uint32_t bsc; - - struct mdp5_interface *intf; - struct mdp5_ctl *ctl; -}; -#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) - -static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) -{ - WARN_ON(mdp5_kms->enable_count <= 0); - msm_writel(data, mdp5_kms->mmio + reg); -} - -static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) -{ - WARN_ON(mdp5_kms->enable_count <= 0); - return msm_readl(mdp5_kms->mmio + reg); -} - -static inline const char *stage2name(enum mdp_mixer_stage_id stage) -{ - static const char *names[] = { -#define NAME(n) [n] = #n - NAME(STAGE_UNUSED), NAME(STAGE_BASE), - NAME(STAGE0), NAME(STAGE1), NAME(STAGE2), - NAME(STAGE3), NAME(STAGE4), NAME(STAGE6), -#undef NAME - }; - return names[stage]; -} - -static inline const char *pipe2name(enum mdp5_pipe pipe) -{ - static const char *names[] = { -#define NAME(n) [SSPP_ ## n] = #n - NAME(VIG0), NAME(VIG1), NAME(VIG2), - NAME(RGB0), NAME(RGB1), NAME(RGB2), - NAME(DMA0), NAME(DMA1), - NAME(VIG3), NAME(RGB3), - NAME(CURSOR0), NAME(CURSOR1), -#undef NAME - }; - return names[pipe]; -} - -static inline int pipe2nclients(enum mdp5_pipe pipe) -{ - switch (pipe) { - case SSPP_RGB0: - case SSPP_RGB1: - case SSPP_RGB2: - case SSPP_RGB3: - return 1; - default: - return 3; - } -} - -static inline uint32_t intf2err(int intf_num) -{ - switch (intf_num) { - case 0: return MDP5_IRQ_INTF0_UNDER_RUN; - case 1: return MDP5_IRQ_INTF1_UNDER_RUN; - case 2: return MDP5_IRQ_INTF2_UNDER_RUN; - case 3: return MDP5_IRQ_INTF3_UNDER_RUN; - default: return 0; - } -} - -static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer, - struct mdp5_interface *intf) -{ - /* - * In case of DSI Command Mode, the Ping Pong's read pointer IRQ - * acts as a Vblank signal. The Ping Pong buffer used is bound to - * layer mixer. - */ - - if ((intf->type == INTF_DSI) && - (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) - return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp; - - if (intf->type == INTF_WB) - return MDP5_IRQ_WB_2_DONE; - - switch (intf->num) { - case 0: return MDP5_IRQ_INTF0_VSYNC; - case 1: return MDP5_IRQ_INTF1_VSYNC; - case 2: return MDP5_IRQ_INTF2_VSYNC; - case 3: return MDP5_IRQ_INTF3_VSYNC; - default: return 0; - } -} - -static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer) -{ - return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; -} - -void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask); -void mdp5_irq_preinstall(struct msm_kms *kms); -int mdp5_irq_postinstall(struct msm_kms *kms); -void mdp5_irq_uninstall(struct msm_kms *kms); -irqreturn_t mdp5_irq(struct msm_kms *kms); -int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); -void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); - -uint32_t mdp5_plane_get_flush(struct drm_plane *plane); -enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); -enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane); -struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum drm_plane_type type); - -struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); -uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); - -struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc); -struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc); -void mdp5_crtc_set_pipeline(struct drm_crtc *crtc); -void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); -struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, - struct drm_plane *plane, - struct drm_plane *cursor_plane, int id); - -struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl); -int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder); -void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); -int mdp5_encoder_get_linecount(struct drm_encoder *encoder); -u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); - -#ifdef CONFIG_DRM_MSM_DSI -void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -void mdp5_cmd_encoder_disable(struct drm_encoder *encoder); -void mdp5_cmd_encoder_enable(struct drm_encoder *encoder); -int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder); -#else -static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ -} -static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) -{ -} -static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) -{ -} -static inline int mdp5_cmd_encoder_set_split_display( - struct drm_encoder *encoder, struct drm_encoder *slave_encoder) -{ - return -EINVAL; -} -#endif - -#endif /* __MDP5_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c deleted file mode 100644 index f2a0db7a8a03..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include - -#include "msm_drv.h" -#include "mdp5_kms.h" - -/* - * If needed, this can become more specific: something like struct mdp5_mdss, - * which contains a 'struct msm_mdss base' member. - */ -struct msm_mdss { - struct drm_device *dev; - - void __iomem *mmio, *vbif; - - struct regulator *vdd; - - struct clk *ahb_clk; - struct clk *axi_clk; - struct clk *vsync_clk; - - struct { - volatile unsigned long enabled_mask; - struct irq_domain *domain; - } irqcontroller; -}; - -static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data) -{ - msm_writel(data, mdss->mmio + reg); -} - -static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg) -{ - return msm_readl(mdss->mmio + reg); -} - -static irqreturn_t mdss_irq(int irq, void *arg) -{ - struct msm_mdss *mdss = arg; - u32 intr; - - intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS); - - VERB("intr=%08x", intr); - - while (intr) { - irq_hw_number_t hwirq = fls(intr) - 1; - - generic_handle_irq(irq_find_mapping( - mdss->irqcontroller.domain, hwirq)); - intr &= ~(1 << hwirq); - } - - return IRQ_HANDLED; -} - -/* - * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc) - * can register to get their irq's delivered - */ - -#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \ - MDSS_HW_INTR_STATUS_INTR_DSI0 | \ - MDSS_HW_INTR_STATUS_INTR_DSI1 | \ - MDSS_HW_INTR_STATUS_INTR_HDMI | \ - MDSS_HW_INTR_STATUS_INTR_EDP) - -static void mdss_hw_mask_irq(struct irq_data *irqd) -{ - struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd); - - smp_mb__before_atomic(); - clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask); - smp_mb__after_atomic(); -} - -static void mdss_hw_unmask_irq(struct irq_data *irqd) -{ - struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd); - - smp_mb__before_atomic(); - set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask); - smp_mb__after_atomic(); -} - -static struct irq_chip mdss_hw_irq_chip = { - .name = "mdss", - .irq_mask = mdss_hw_mask_irq, - .irq_unmask = mdss_hw_unmask_irq, -}; - -static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct msm_mdss *mdss = d->host_data; - - if (!(VALID_IRQS & (1 << hwirq))) - return -EPERM; - - irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq); - irq_set_chip_data(irq, mdss); - - return 0; -} - -static const struct irq_domain_ops mdss_hw_irqdomain_ops = { - .map = mdss_hw_irqdomain_map, - .xlate = irq_domain_xlate_onecell, -}; - - -static int mdss_irq_domain_init(struct msm_mdss *mdss) -{ - struct device *dev = mdss->dev->dev; - struct irq_domain *d; - - d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, - mdss); - if (!d) { - dev_err(dev, "mdss irq domain add failed\n"); - return -ENXIO; - } - - mdss->irqcontroller.enabled_mask = 0; - mdss->irqcontroller.domain = d; - - return 0; -} - -int msm_mdss_enable(struct msm_mdss *mdss) -{ - DBG(""); - - clk_prepare_enable(mdss->ahb_clk); - if (mdss->axi_clk) - clk_prepare_enable(mdss->axi_clk); - if (mdss->vsync_clk) - clk_prepare_enable(mdss->vsync_clk); - - return 0; -} - -int msm_mdss_disable(struct msm_mdss *mdss) -{ - DBG(""); - - if (mdss->vsync_clk) - clk_disable_unprepare(mdss->vsync_clk); - if (mdss->axi_clk) - clk_disable_unprepare(mdss->axi_clk); - clk_disable_unprepare(mdss->ahb_clk); - - return 0; -} - -static int msm_mdss_get_clocks(struct msm_mdss *mdss) -{ - struct platform_device *pdev = to_platform_device(mdss->dev->dev); - - mdss->ahb_clk = msm_clk_get(pdev, "iface"); - if (IS_ERR(mdss->ahb_clk)) - mdss->ahb_clk = NULL; - - mdss->axi_clk = msm_clk_get(pdev, "bus"); - if (IS_ERR(mdss->axi_clk)) - mdss->axi_clk = NULL; - - mdss->vsync_clk = msm_clk_get(pdev, "vsync"); - if (IS_ERR(mdss->vsync_clk)) - mdss->vsync_clk = NULL; - - return 0; -} - -void msm_mdss_destroy(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_mdss *mdss = priv->mdss; - - if (!mdss) - return; - - irq_domain_remove(mdss->irqcontroller.domain); - mdss->irqcontroller.domain = NULL; - - regulator_disable(mdss->vdd); - - pm_runtime_disable(dev->dev); -} - -int msm_mdss_init(struct drm_device *dev) -{ - struct platform_device *pdev = to_platform_device(dev->dev); - struct msm_drm_private *priv = dev->dev_private; - struct msm_mdss *mdss; - int ret; - - DBG(""); - - if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss")) - return 0; - - mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL); - if (!mdss) { - ret = -ENOMEM; - goto fail; - } - - mdss->dev = dev; - - mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS"); - if (IS_ERR(mdss->mmio)) { - ret = PTR_ERR(mdss->mmio); - goto fail; - } - - mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); - if (IS_ERR(mdss->vbif)) { - ret = PTR_ERR(mdss->vbif); - goto fail; - } - - ret = msm_mdss_get_clocks(mdss); - if (ret) { - dev_err(dev->dev, "failed to get clocks: %d\n", ret); - goto fail; - } - - /* Regulator to enable GDSCs in downstream kernels */ - mdss->vdd = devm_regulator_get(dev->dev, "vdd"); - if (IS_ERR(mdss->vdd)) { - ret = PTR_ERR(mdss->vdd); - goto fail; - } - - ret = regulator_enable(mdss->vdd); - if (ret) { - dev_err(dev->dev, "failed to enable regulator vdd: %d\n", - ret); - goto fail; - } - - ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), - mdss_irq, 0, "mdss_isr", mdss); - if (ret) { - dev_err(dev->dev, "failed to init irq: %d\n", ret); - goto fail_irq; - } - - ret = mdss_irq_domain_init(mdss); - if (ret) { - dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret); - goto fail_irq; - } - - priv->mdss = mdss; - - pm_runtime_enable(dev->dev); - - return 0; -fail_irq: - regulator_disable(mdss->vdd); -fail: - return ret; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c deleted file mode 100644 index 8a00991f03c7..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (C) 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "mdp5_kms.h" - -/* - * As of now, there are only 2 combinations possible for source split: - * - * Left | Right - * -----|------ - * LM0 | LM1 - * LM2 | LM5 - * - */ -static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 }; - -static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm) -{ - int i; - int pair_lm; - - pair_lm = lm_right_pair[lm]; - if (pair_lm < 0) - return -EINVAL; - - for (i = 0; i < mdp5_kms->num_hwmixers; i++) { - struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i]; - - if (mixer->lm == pair_lm) - return mixer->idx; - } - - return -1; -} - -int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, - uint32_t caps, struct mdp5_hw_mixer **mixer, - struct mdp5_hw_mixer **r_mixer) -{ - struct msm_drm_private *priv = s->dev->dev_private; - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_state *state = mdp5_get_state(s); - struct mdp5_hw_mixer_state *new_state; - int i; - - if (IS_ERR(state)) - return PTR_ERR(state); - - new_state = &state->hwmixer; - - for (i = 0; i < mdp5_kms->num_hwmixers; i++) { - struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i]; - - /* - * skip if already in-use by a different CRTC. If there is a - * mixer already assigned to this CRTC, it means this call is - * a request to get an additional right mixer. Assume that the - * existing mixer is the 'left' one, and try to see if we can - * get its corresponding 'right' pair. - */ - if (new_state->hwmixer_to_crtc[cur->idx] && - new_state->hwmixer_to_crtc[cur->idx] != crtc) - continue; - - /* skip if doesn't support some required caps: */ - if (caps & ~cur->caps) - continue; - - if (r_mixer) { - int pair_idx; - - pair_idx = get_right_pair_idx(mdp5_kms, cur->lm); - if (pair_idx < 0) - return -EINVAL; - - if (new_state->hwmixer_to_crtc[pair_idx]) - continue; - - *r_mixer = mdp5_kms->hwmixers[pair_idx]; - } - - /* - * prefer a pair-able LM over an unpairable one. We can - * switch the CRTC from Normal mode to Source Split mode - * without requiring a full modeset if we had already - * assigned this CRTC a pair-able LM. - * - * TODO: There will be assignment sequences which would - * result in the CRTC requiring a full modeset, even - * if we have the LM resources to prevent it. For a platform - * with a few displays, we don't run out of pair-able LMs - * so easily. For now, ignore the possibility of requiring - * a full modeset. - */ - if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR) - *mixer = cur; - } - - if (!(*mixer)) - return -ENOMEM; - - if (r_mixer && !(*r_mixer)) - return -ENOMEM; - - DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name); - - new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc; - if (r_mixer) { - DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm, - crtc->name); - new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc; - } - - return 0; -} - -void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) -{ - struct mdp5_state *state = mdp5_get_state(s); - struct mdp5_hw_mixer_state *new_state = &state->hwmixer; - - if (!mixer) - return; - - if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx])) - return; - - DBG("%s: release from crtc %s", mixer->name, - new_state->hwmixer_to_crtc[mixer->idx]->name); - - new_state->hwmixer_to_crtc[mixer->idx] = NULL; -} - -void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) -{ - kfree(mixer); -} - -static const char * const mixer_names[] = { - "LM0", "LM1", "LM2", "LM3", "LM4", "LM5", -}; - -struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) -{ - struct mdp5_hw_mixer *mixer; - - mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); - if (!mixer) - return ERR_PTR(-ENOMEM); - - mixer->name = mixer_names[lm->id]; - mixer->lm = lm->id; - mixer->caps = lm->caps; - mixer->pp = lm->pp; - mixer->dspp = lm->dspp; - mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id); - - return mixer; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h deleted file mode 100644 index 9be94f567fbd..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (C) 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MDP5_LM_H__ -#define __MDP5_LM_H__ - -/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */ -struct mdp5_hw_mixer { - int idx; - - const char *name; - - int lm; /* the LM instance # */ - uint32_t caps; - int pp; - int dspp; - - uint32_t flush_mask; /* used to commit LM registers */ -}; - -/* global atomic state of assignment between CRTCs and Layer Mixers: */ -struct mdp5_hw_mixer_state { - struct drm_crtc *hwmixer_to_crtc[8]; -}; - -struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm); -void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); -int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, - uint32_t caps, struct mdp5_hw_mixer **mixer, - struct mdp5_hw_mixer **r_mixer); -void mdp5_mixer_release(struct drm_atomic_state *s, - struct mdp5_hw_mixer *mixer); - -#endif /* __MDP5_LM_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c deleted file mode 100644 index ff52c49095f9..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright (C) 2016 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "mdp5_kms.h" - -int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, - uint32_t caps, uint32_t blkcfg, - struct mdp5_hw_pipe **hwpipe, - struct mdp5_hw_pipe **r_hwpipe) -{ - struct msm_drm_private *priv = s->dev->dev_private; - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_state *state; - struct mdp5_hw_pipe_state *old_state, *new_state; - int i, j; - - state = mdp5_get_state(s); - if (IS_ERR(state)) - return PTR_ERR(state); - - /* grab old_state after mdp5_get_state(), since now we hold lock: */ - old_state = &mdp5_kms->state->hwpipe; - new_state = &state->hwpipe; - - for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; - - /* skip if already in-use.. check both new and old state, - * since we cannot immediately re-use a pipe that is - * released in the current update in some cases: - * (1) mdp5 can have SMP (non-double-buffered) - * (2) hw pipe previously assigned to different CRTC - * (vblanks might not be aligned) - */ - if (new_state->hwpipe_to_plane[cur->idx] || - old_state->hwpipe_to_plane[cur->idx]) - continue; - - /* skip if doesn't support some required caps: */ - if (caps & ~cur->caps) - continue; - - /* - * don't assign a cursor pipe to a plane that isn't going to - * be used as a cursor - */ - if (cur->caps & MDP_PIPE_CAP_CURSOR && - plane->type != DRM_PLANE_TYPE_CURSOR) - continue; - - /* possible candidate, take the one with the - * fewest unneeded caps bits set: - */ - if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) < - hweight_long((*hwpipe)->caps & ~caps))) { - bool r_found = false; - - if (r_hwpipe) { - for (j = i + 1; j < mdp5_kms->num_hwpipes; - j++) { - struct mdp5_hw_pipe *r_cur = - mdp5_kms->hwpipes[j]; - - /* reject different types of hwpipes */ - if (r_cur->caps != cur->caps) - continue; - - /* respect priority, eg. VIG0 > VIG1 */ - if (cur->pipe > r_cur->pipe) - continue; - - *r_hwpipe = r_cur; - r_found = true; - break; - } - } - - if (!r_hwpipe || r_found) - *hwpipe = cur; - } - } - - if (!(*hwpipe)) - return -ENOMEM; - - if (r_hwpipe && !(*r_hwpipe)) - return -ENOMEM; - - if (mdp5_kms->smp) { - int ret; - - /* We don't support SMP and 2 hwpipes/plane together */ - WARN_ON(r_hwpipe); - - DBG("%s: alloc SMP blocks", (*hwpipe)->name); - ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, - (*hwpipe)->pipe, blkcfg); - if (ret) - return -ENOMEM; - - (*hwpipe)->blkcfg = blkcfg; - } - - DBG("%s: assign to plane %s for caps %x", - (*hwpipe)->name, plane->name, caps); - new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane; - - if (r_hwpipe) { - DBG("%s: assign to right of plane %s for caps %x", - (*r_hwpipe)->name, plane->name, caps); - new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane; - } - - return 0; -} - -void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) -{ - struct msm_drm_private *priv = s->dev->dev_private; - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_state *state = mdp5_get_state(s); - struct mdp5_hw_pipe_state *new_state = &state->hwpipe; - - if (!hwpipe) - return; - - if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx])) - return; - - DBG("%s: release from plane %s", hwpipe->name, - new_state->hwpipe_to_plane[hwpipe->idx]->name); - - if (mdp5_kms->smp) { - DBG("%s: free SMP blocks", hwpipe->name); - mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe); - } - - new_state->hwpipe_to_plane[hwpipe->idx] = NULL; -} - -void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) -{ - kfree(hwpipe); -} - -struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, - uint32_t reg_offset, uint32_t caps) -{ - struct mdp5_hw_pipe *hwpipe; - - hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); - if (!hwpipe) - return ERR_PTR(-ENOMEM); - - hwpipe->name = pipe2name(pipe); - hwpipe->pipe = pipe; - hwpipe->reg_offset = reg_offset; - hwpipe->caps = caps; - hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); - - return hwpipe; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h deleted file mode 100644 index bb2b0ac7aa2b..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (C) 2016 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MDP5_PIPE_H__ -#define __MDP5_PIPE_H__ - -/* TODO: Add SSPP_MAX in mdp5.xml.h */ -#define SSPP_MAX (SSPP_CURSOR1 + 1) - -/* represents a hw pipe, which is dynamically assigned to a plane */ -struct mdp5_hw_pipe { - int idx; - - const char *name; - enum mdp5_pipe pipe; - - uint32_t reg_offset; - uint32_t caps; - - uint32_t flush_mask; /* used to commit pipe registers */ - - /* number of smp blocks per plane, ie: - * nblks_y | (nblks_u << 8) | (nblks_v << 16) - */ - uint32_t blkcfg; -}; - -/* global atomic state of assignment between pipes and planes: */ -struct mdp5_hw_pipe_state { - struct drm_plane *hwpipe_to_plane[SSPP_MAX]; -}; - -int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, - uint32_t caps, uint32_t blkcfg, - struct mdp5_hw_pipe **hwpipe, - struct mdp5_hw_pipe **r_hwpipe); -void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); - -struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, - uint32_t reg_offset, uint32_t caps); -void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); - -#endif /* __MDP5_PIPE_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c deleted file mode 100644 index 98d4d7331767..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ /dev/null @@ -1,1137 +0,0 @@ -/* - * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include -#include "mdp5_kms.h" - -struct mdp5_plane { - struct drm_plane base; - - uint32_t nformats; - uint32_t formats[32]; -}; -#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) - -static int mdp5_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_rect *src, struct drm_rect *dest); - -static struct mdp5_kms *get_kms(struct drm_plane *plane) -{ - struct msm_drm_private *priv = plane->dev->dev_private; - return to_mdp5_kms(to_mdp_kms(priv->kms)); -} - -static bool plane_enabled(struct drm_plane_state *state) -{ - return state->visible; -} - -static void mdp5_plane_destroy(struct drm_plane *plane) -{ - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - - drm_plane_helper_disable(plane); - drm_plane_cleanup(plane); - - kfree(mdp5_plane); -} - -static void mdp5_plane_install_rotation_property(struct drm_device *dev, - struct drm_plane *plane) -{ - drm_plane_create_rotation_property(plane, - DRM_MODE_ROTATE_0, - DRM_MODE_ROTATE_0 | - DRM_MODE_ROTATE_180 | - DRM_MODE_REFLECT_X | - DRM_MODE_REFLECT_Y); -} - -/* helper to install properties which are common to planes and crtcs */ -static void mdp5_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj) -{ - struct drm_device *dev = plane->dev; - struct msm_drm_private *dev_priv = dev->dev_private; - struct drm_property *prop; - -#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \ - prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \ - if (!prop) { \ - prop = drm_property_##fnc(dev, 0, #name, \ - ##__VA_ARGS__); \ - if (!prop) { \ - dev_warn(dev->dev, \ - "Create property %s failed\n", \ - #name); \ - return; \ - } \ - dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \ - } \ - drm_object_attach_property(&plane->base, prop, init_val); \ - } while (0) - -#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \ - INSTALL_PROPERTY(name, NAME, init_val, \ - create_range, min, max) - -#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \ - INSTALL_PROPERTY(name, NAME, init_val, \ - create_enum, name##_prop_enum_list, \ - ARRAY_SIZE(name##_prop_enum_list)) - - INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1); - - mdp5_plane_install_rotation_property(dev, plane); - -#undef INSTALL_RANGE_PROPERTY -#undef INSTALL_ENUM_PROPERTY -#undef INSTALL_PROPERTY -} - -static int mdp5_plane_atomic_set_property(struct drm_plane *plane, - struct drm_plane_state *state, struct drm_property *property, - uint64_t val) -{ - struct drm_device *dev = plane->dev; - struct mdp5_plane_state *pstate; - struct msm_drm_private *dev_priv = dev->dev_private; - int ret = 0; - - pstate = to_mdp5_plane_state(state); - -#define SET_PROPERTY(name, NAME, type) do { \ - if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ - pstate->name = (type)val; \ - DBG("Set property %s %d", #name, (type)val); \ - goto done; \ - } \ - } while (0) - - SET_PROPERTY(zpos, ZPOS, uint8_t); - - dev_err(dev->dev, "Invalid property\n"); - ret = -EINVAL; -done: - return ret; -#undef SET_PROPERTY -} - -static int mdp5_plane_atomic_get_property(struct drm_plane *plane, - const struct drm_plane_state *state, - struct drm_property *property, uint64_t *val) -{ - struct drm_device *dev = plane->dev; - struct mdp5_plane_state *pstate; - struct msm_drm_private *dev_priv = dev->dev_private; - int ret = 0; - - pstate = to_mdp5_plane_state(state); - -#define GET_PROPERTY(name, NAME, type) do { \ - if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ - *val = pstate->name; \ - DBG("Get property %s %lld", #name, *val); \ - goto done; \ - } \ - } while (0) - - GET_PROPERTY(zpos, ZPOS, uint8_t); - - dev_err(dev->dev, "Invalid property\n"); - ret = -EINVAL; -done: - return ret; -#undef SET_PROPERTY -} - -static void -mdp5_plane_atomic_print_state(struct drm_printer *p, - const struct drm_plane_state *state) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); - struct mdp5_kms *mdp5_kms = get_kms(state->plane); - - drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? - pstate->hwpipe->name : "(null)"); - if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) - drm_printf(p, "\tright-hwpipe=%s\n", - pstate->r_hwpipe ? pstate->r_hwpipe->name : - "(null)"); - drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); - drm_printf(p, "\tzpos=%u\n", pstate->zpos); - drm_printf(p, "\talpha=%u\n", pstate->alpha); - drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); -} - -static void mdp5_plane_reset(struct drm_plane *plane) -{ - struct mdp5_plane_state *mdp5_state; - - if (plane->state && plane->state->fb) - drm_framebuffer_unreference(plane->state->fb); - - kfree(to_mdp5_plane_state(plane->state)); - mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); - - /* assign default blend parameters */ - mdp5_state->alpha = 255; - mdp5_state->premultiplied = 0; - - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - mdp5_state->zpos = STAGE_BASE; - else - mdp5_state->zpos = STAGE0 + drm_plane_index(plane); - - mdp5_state->base.plane = plane; - - plane->state = &mdp5_state->base; -} - -static struct drm_plane_state * -mdp5_plane_duplicate_state(struct drm_plane *plane) -{ - struct mdp5_plane_state *mdp5_state; - - if (WARN_ON(!plane->state)) - return NULL; - - mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), - sizeof(*mdp5_state), GFP_KERNEL); - if (!mdp5_state) - return NULL; - - __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); - - return &mdp5_state->base; -} - -static void mdp5_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); - - if (state->fb) - drm_framebuffer_unreference(state->fb); - - kfree(pstate); -} - -static const struct drm_plane_funcs mdp5_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = mdp5_plane_destroy, - .atomic_set_property = mdp5_plane_atomic_set_property, - .atomic_get_property = mdp5_plane_atomic_get_property, - .reset = mdp5_plane_reset, - .atomic_duplicate_state = mdp5_plane_duplicate_state, - .atomic_destroy_state = mdp5_plane_destroy_state, - .atomic_print_state = mdp5_plane_atomic_print_state, -}; - -static int mdp5_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct msm_kms *kms = &mdp5_kms->base.base; - struct drm_framebuffer *fb = new_state->fb; - - if (!new_state->fb) - return 0; - - DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); - return msm_framebuffer_prepare(fb, kms->aspace); -} - -static void mdp5_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct msm_kms *kms = &mdp5_kms->base.base; - struct drm_framebuffer *fb = old_state->fb; - - if (!fb) - return; - - DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); - msm_framebuffer_cleanup(fb, kms->aspace); -} - -#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) -static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, - struct drm_plane_state *state) -{ - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); - struct drm_plane *plane = state->plane; - struct drm_plane_state *old_state = plane->state; - struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); - bool new_hwpipe = false; - bool need_right_hwpipe = false; - uint32_t max_width, max_height; - bool out_of_bounds = false; - uint32_t caps = 0; - struct drm_rect clip = {}; - int min_scale, max_scale; - int ret; - - DBG("%s: check (%d -> %d)", plane->name, - plane_enabled(old_state), plane_enabled(state)); - - max_width = config->hw->lm.max_width << 16; - max_height = config->hw->lm.max_height << 16; - - /* Make sure source dimensions are within bounds. */ - if (state->src_h > max_height) - out_of_bounds = true; - - if (state->src_w > max_width) { - /* If source split is supported, we can go up to 2x - * the max LM width, but we'd need to stage another - * hwpipe to the right LM. So, the drm_plane would - * consist of 2 hwpipes. - */ - if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT && - (state->src_w <= 2 * max_width)) - need_right_hwpipe = true; - else - out_of_bounds = true; - } - - if (out_of_bounds) { - struct drm_rect src = drm_plane_state_src(state); - DBG("Invalid source size "DRM_RECT_FP_FMT, - DRM_RECT_FP_ARG(&src)); - return -ERANGE; - } - - min_scale = FRAC_16_16(1, 8); - max_scale = FRAC_16_16(8, 1); - - if (crtc_state->enable) - drm_mode_get_hv_timing(&crtc_state->mode, - &clip.x2, &clip.y2); - - ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip, - min_scale, max_scale, - true, true); - if (ret) - return ret; - - if (plane_enabled(state)) { - unsigned int rotation; - const struct mdp_format *format; - struct mdp5_kms *mdp5_kms = get_kms(plane); - uint32_t blkcfg = 0; - - format = to_mdp_format(msm_framebuffer_format(state->fb)); - if (MDP_FORMAT_IS_YUV(format)) - caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; - - if (((state->src_w >> 16) != state->crtc_w) || - ((state->src_h >> 16) != state->crtc_h)) - caps |= MDP_PIPE_CAP_SCALE; - - rotation = drm_rotation_simplify(state->rotation, - DRM_MODE_ROTATE_0 | - DRM_MODE_REFLECT_X | - DRM_MODE_REFLECT_Y); - - if (rotation & DRM_MODE_REFLECT_X) - caps |= MDP_PIPE_CAP_HFLIP; - - if (rotation & DRM_MODE_REFLECT_Y) - caps |= MDP_PIPE_CAP_VFLIP; - - if (plane->type == DRM_PLANE_TYPE_CURSOR) - caps |= MDP_PIPE_CAP_CURSOR; - - /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ - if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) - new_hwpipe = true; - - /* - * (re)allocte hw pipe if we're either requesting for 2 hw pipes - * or we're switching from 2 hw pipes to 1 hw pipe because the - * new src_w can be supported by 1 hw pipe itself. - */ - if ((need_right_hwpipe && !mdp5_state->r_hwpipe) || - (!need_right_hwpipe && mdp5_state->r_hwpipe)) - new_hwpipe = true; - - if (mdp5_kms->smp) { - const struct mdp_format *format = - to_mdp_format(msm_framebuffer_format(state->fb)); - - blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format, - state->src_w >> 16, false); - - if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg)) - new_hwpipe = true; - } - - /* (re)assign hwpipe if needed, otherwise keep old one: */ - if (new_hwpipe) { - /* TODO maybe we want to re-assign hwpipe sometimes - * in cases when we no-longer need some caps to make - * it available for other planes? - */ - struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; - struct mdp5_hw_pipe *old_right_hwpipe = - mdp5_state->r_hwpipe; - struct mdp5_hw_pipe *new_hwpipe = NULL; - struct mdp5_hw_pipe *new_right_hwpipe = NULL; - - ret = mdp5_pipe_assign(state->state, plane, caps, - blkcfg, &new_hwpipe, - need_right_hwpipe ? - &new_right_hwpipe : NULL); - if (ret) { - DBG("%s: failed to assign hwpipe(s)!", - plane->name); - return ret; - } - - mdp5_state->hwpipe = new_hwpipe; - if (need_right_hwpipe) - mdp5_state->r_hwpipe = new_right_hwpipe; - else - /* - * set it to NULL so that the driver knows we - * don't have a right hwpipe when committing a - * new state - */ - mdp5_state->r_hwpipe = NULL; - - - mdp5_pipe_release(state->state, old_hwpipe); - mdp5_pipe_release(state->state, old_right_hwpipe); - } - } else { - mdp5_pipe_release(state->state, mdp5_state->hwpipe); - mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); - mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; - } - - return 0; -} - -static int mdp5_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - - crtc = state->crtc ? state->crtc : plane->state->crtc; - if (!crtc) - return 0; - - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); - if (WARN_ON(!crtc_state)) - return -EINVAL; - - return mdp5_plane_atomic_check_with_state(crtc_state, state); -} - -static void mdp5_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_plane_state *state = plane->state; - - DBG("%s: update", plane->name); - - if (plane_enabled(state)) { - int ret; - - ret = mdp5_plane_mode_set(plane, - state->crtc, state->fb, - &state->src, &state->dst); - /* atomic_check should have ensured that this doesn't fail */ - WARN_ON(ret < 0); - } -} - -static int mdp5_plane_atomic_async_check(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); - struct drm_crtc_state *crtc_state; - struct drm_rect clip = {}; - int min_scale, max_scale; - int ret; - - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); - if (WARN_ON(!crtc_state)) - return -EINVAL; - - if (!crtc_state->active) - return -EINVAL; - - mdp5_state = to_mdp5_plane_state(state); - - /* don't use fast path if we don't have a hwpipe allocated yet */ - if (!mdp5_state->hwpipe) - return -EINVAL; - - /* only allow changing of position(crtc x/y or src x/y) in fast path */ - if (plane->state->crtc != state->crtc || - plane->state->src_w != state->src_w || - plane->state->src_h != state->src_h || - plane->state->crtc_w != state->crtc_w || - plane->state->crtc_h != state->crtc_h || - !plane->state->fb || - plane->state->fb != state->fb) - return -EINVAL; - - min_scale = FRAC_16_16(1, 8); - max_scale = FRAC_16_16(8, 1); - - if (crtc_state->enable) - drm_mode_get_hv_timing(&crtc_state->mode, - &clip.x2, &clip.y2); - - ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip, - min_scale, max_scale, - true, true); - if (ret) - return ret; - - /* - * if the visibility of the plane changes (i.e, if the cursor is - * clipped out completely, we can't take the async path because - * we need to stage/unstage the plane from the Layer Mixer(s). We - * also assign/unassign the hwpipe(s) tied to the plane. We avoid - * taking the fast path for both these reasons. - */ - if (state->visible != plane->state->visible) - return -EINVAL; - - return 0; -} - -static void mdp5_plane_atomic_async_update(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - plane->state->src_x = new_state->src_x; - plane->state->src_y = new_state->src_y; - plane->state->crtc_x = new_state->crtc_x; - plane->state->crtc_y = new_state->crtc_y; - - if (plane_enabled(new_state)) { - struct mdp5_ctl *ctl; - struct mdp5_pipeline *pipeline = - mdp5_crtc_get_pipeline(plane->crtc); - int ret; - - ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb, - &new_state->src, &new_state->dst); - WARN_ON(ret < 0); - - ctl = mdp5_crtc_get_ctl(new_state->crtc); - - mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); - } - - *to_mdp5_plane_state(plane->state) = - *to_mdp5_plane_state(new_state); -} - -static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { - .prepare_fb = mdp5_plane_prepare_fb, - .cleanup_fb = mdp5_plane_cleanup_fb, - .atomic_check = mdp5_plane_atomic_check, - .atomic_update = mdp5_plane_atomic_update, - .atomic_async_check = mdp5_plane_atomic_async_check, - .atomic_async_update = mdp5_plane_atomic_async_update, -}; - -static void set_scanout_locked(struct mdp5_kms *mdp5_kms, - enum mdp5_pipe pipe, - struct drm_framebuffer *fb) -{ - struct msm_kms *kms = &mdp5_kms->base.base; - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), - MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | - MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), - MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | - MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), - msm_framebuffer_iova(fb, kms->aspace, 0)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), - msm_framebuffer_iova(fb, kms->aspace, 1)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), - msm_framebuffer_iova(fb, kms->aspace, 2)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), - msm_framebuffer_iova(fb, kms->aspace, 3)); -} - -/* Note: mdp5_plane->pipe_lock must be locked */ -static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe) -{ - uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) & - ~MDP5_PIPE_OP_MODE_CSC_1_EN; - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value); -} - -/* Note: mdp5_plane->pipe_lock must be locked */ -static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, - struct csc_cfg *csc) -{ - uint32_t i, mode = 0; /* RGB, no CSC */ - uint32_t *matrix; - - if (unlikely(!csc)) - return; - - if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type)) - mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV); - if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type)) - mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV); - mode |= MDP5_PIPE_OP_MODE_CSC_1_EN; - mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode); - - matrix = csc->matrix; - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe), - MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) | - MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1])); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe), - MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) | - MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3])); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe), - MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) | - MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5])); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe), - MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) | - MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7])); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe), - MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8])); - - for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) { - uint32_t *pre_clamp = csc->pre_clamp; - uint32_t *post_clamp = csc->post_clamp; - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i), - MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) | - MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i), - MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) | - MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i), - MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i), - MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i])); - } -} - -#define PHASE_STEP_SHIFT 21 -#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */ - -static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) -{ - uint32_t unit; - - if (src == 0 || dst == 0) - return -EINVAL; - - /* - * PHASE_STEP_X/Y is coded on 26 bits (25:0), - * where 2^21 represents the unity "1" in fixed-point hardware design. - * This leaves 5 bits for the integer part (downscale case): - * -> maximum downscale ratio = 0b1_1111 = 31 - */ - if (src > (dst * DOWN_SCALE_RATIO_MAX)) - return -EOVERFLOW; - - unit = 1 << PHASE_STEP_SHIFT; - *out_phase = mult_frac(unit, src, dst); - - return 0; -} - -static int calc_scalex_steps(struct drm_plane *plane, - uint32_t pixel_format, uint32_t src, uint32_t dest, - uint32_t phasex_steps[COMP_MAX]) -{ - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct device *dev = mdp5_kms->dev->dev; - uint32_t phasex_step; - unsigned int hsub; - int ret; - - ret = calc_phase_step(src, dest, &phasex_step); - if (ret) { - dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); - return ret; - } - - hsub = drm_format_horz_chroma_subsampling(pixel_format); - - phasex_steps[COMP_0] = phasex_step; - phasex_steps[COMP_3] = phasex_step; - phasex_steps[COMP_1_2] = phasex_step / hsub; - - return 0; -} - -static int calc_scaley_steps(struct drm_plane *plane, - uint32_t pixel_format, uint32_t src, uint32_t dest, - uint32_t phasey_steps[COMP_MAX]) -{ - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct device *dev = mdp5_kms->dev->dev; - uint32_t phasey_step; - unsigned int vsub; - int ret; - - ret = calc_phase_step(src, dest, &phasey_step); - if (ret) { - dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); - return ret; - } - - vsub = drm_format_vert_chroma_subsampling(pixel_format); - - phasey_steps[COMP_0] = phasey_step; - phasey_steps[COMP_3] = phasey_step; - phasey_steps[COMP_1_2] = phasey_step / vsub; - - return 0; -} - -static uint32_t get_scale_config(const struct mdp_format *format, - uint32_t src, uint32_t dst, bool horz) -{ - bool scaling = format->is_yuv ? true : (src != dst); - uint32_t sub, pix_fmt = format->base.pixel_format; - uint32_t ya_filter, uv_filter; - bool yuv = format->is_yuv; - - if (!scaling) - return 0; - - if (yuv) { - sub = horz ? drm_format_horz_chroma_subsampling(pix_fmt) : - drm_format_vert_chroma_subsampling(pix_fmt); - uv_filter = ((src / sub) <= dst) ? - SCALE_FILTER_BIL : SCALE_FILTER_PCMN; - } - ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; - - if (horz) - return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | - MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) | - COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter)); - else - return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | - MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) | - COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter)); -} - -static void calc_pixel_ext(const struct mdp_format *format, - uint32_t src, uint32_t dst, uint32_t phase_step[2], - int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX], - bool horz) -{ - bool scaling = format->is_yuv ? true : (src != dst); - int i; - - /* - * Note: - * We assume here that: - * 1. PCMN filter is used for downscale - * 2. bilinear filter is used for upscale - * 3. we are in a single pipe configuration - */ - - for (i = 0; i < COMP_MAX; i++) { - pix_ext_edge1[i] = 0; - pix_ext_edge2[i] = scaling ? 1 : 0; - } -} - -static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, - const struct mdp_format *format, - uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX], - uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX]) -{ - uint32_t pix_fmt = format->base.pixel_format; - uint32_t lr, tb, req; - int i; - - for (i = 0; i < COMP_MAX; i++) { - uint32_t roi_w = src_w; - uint32_t roi_h = src_h; - - if (format->is_yuv && i == COMP_1_2) { - roi_w /= drm_format_horz_chroma_subsampling(pix_fmt); - roi_h /= drm_format_vert_chroma_subsampling(pix_fmt); - } - - lr = (pe_left[i] >= 0) ? - MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) : - MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]); - - lr |= (pe_right[i] >= 0) ? - MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) : - MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]); - - tb = (pe_top[i] >= 0) ? - MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) : - MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]); - - tb |= (pe_bottom[i] >= 0) ? - MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) : - MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]); - - req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w + - pe_left[i] + pe_right[i]); - - req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h + - pe_top[i] + pe_bottom[i]); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req); - - DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i, - FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT), - FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT), - FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF), - FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF), - FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT)); - - DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i, - FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT), - FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT), - FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF), - FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF), - FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM)); - } -} - -struct pixel_ext { - int left[COMP_MAX]; - int right[COMP_MAX]; - int top[COMP_MAX]; - int bottom[COMP_MAX]; -}; - -struct phase_step { - u32 x[COMP_MAX]; - u32 y[COMP_MAX]; -}; - -static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, - struct mdp5_hw_pipe *hwpipe, - struct drm_framebuffer *fb, - struct phase_step *step, - struct pixel_ext *pe, - u32 scale_config, u32 hdecm, u32 vdecm, - bool hflip, bool vflip, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - u32 src_img_w, u32 src_img_h, - u32 src_x, u32 src_y, - u32 src_w, u32 src_h) -{ - enum mdp5_pipe pipe = hwpipe->pipe; - bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; - const struct mdp_format *format = - to_mdp_format(msm_framebuffer_format(fb)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), - MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) | - MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), - MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | - MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), - MDP5_PIPE_SRC_XY_X(src_x) | - MDP5_PIPE_SRC_XY_Y(src_y)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), - MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | - MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), - MDP5_PIPE_OUT_XY_X(crtc_x) | - MDP5_PIPE_OUT_XY_Y(crtc_y)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), - MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | - MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | - MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | - MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | - COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | - MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | - MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | - COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | - MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | - MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), - MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | - MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | - MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | - MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), - (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | - (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | - COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) | - MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); - - /* not using secure mode: */ - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - - if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) - mdp5_write_pixel_ext(mdp5_kms, pipe, format, - src_w, pe->left, pe->right, - src_h, pe->top, pe->bottom); - - if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), - step->x[COMP_0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), - step->y[COMP_0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), - step->x[COMP_1_2]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), - step->y[COMP_1_2]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), - MDP5_PIPE_DECIMATION_VERT(vdecm) | - MDP5_PIPE_DECIMATION_HORZ(hdecm)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), - scale_config); - } - - if (hwpipe->caps & MDP_PIPE_CAP_CSC) { - if (MDP_FORMAT_IS_YUV(format)) - csc_enable(mdp5_kms, pipe, - mdp_get_default_csc_cfg(CSC_YUV2RGB)); - else - csc_disable(mdp5_kms, pipe); - } - - set_scanout_locked(mdp5_kms, pipe, fb); -} - -static int mdp5_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_rect *src, struct drm_rect *dest) -{ - struct drm_plane_state *pstate = plane->state; - struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; - struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = hwpipe->pipe; - struct mdp5_hw_pipe *right_hwpipe; - const struct mdp_format *format; - uint32_t nplanes, config = 0; - struct phase_step step = { { 0 } }; - struct pixel_ext pe = { { 0 } }; - uint32_t hdecm = 0, vdecm = 0; - uint32_t pix_format; - unsigned int rotation; - bool vflip, hflip; - int crtc_x, crtc_y; - unsigned int crtc_w, crtc_h; - uint32_t src_x, src_y; - uint32_t src_w, src_h; - uint32_t src_img_w, src_img_h; - int ret; - - nplanes = fb->format->num_planes; - - /* bad formats should already be rejected: */ - if (WARN_ON(nplanes > pipe2nclients(pipe))) - return -EINVAL; - - format = to_mdp_format(msm_framebuffer_format(fb)); - pix_format = format->base.pixel_format; - - src_x = src->x1; - src_y = src->y1; - src_w = drm_rect_width(src); - src_h = drm_rect_height(src); - - crtc_x = dest->x1; - crtc_y = dest->y1; - crtc_w = drm_rect_width(dest); - crtc_h = drm_rect_height(dest); - - /* src values are in Q16 fixed point, convert to integer: */ - src_x = src_x >> 16; - src_y = src_y >> 16; - src_w = src_w >> 16; - src_h = src_h >> 16; - - src_img_w = min(fb->width, src_w); - src_img_h = min(fb->height, src_h); - - DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, - fb->base.id, src_x, src_y, src_w, src_h, - crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - - right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe; - if (right_hwpipe) { - /* - * if the plane comprises of 2 hw pipes, assume that the width - * is split equally across them. The only parameters that varies - * between the 2 pipes are src_x and crtc_x - */ - crtc_w /= 2; - src_w /= 2; - src_img_w /= 2; - } - - ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x); - if (ret) - return ret; - - ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y); - if (ret) - return ret; - - if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { - calc_pixel_ext(format, src_w, crtc_w, step.x, - pe.left, pe.right, true); - calc_pixel_ext(format, src_h, crtc_h, step.y, - pe.top, pe.bottom, false); - } - - /* TODO calc hdecm, vdecm */ - - /* SCALE is used to both scale and up-sample chroma components */ - config |= get_scale_config(format, src_w, crtc_w, true); - config |= get_scale_config(format, src_h, crtc_h, false); - DBG("scale config = %x", config); - - rotation = drm_rotation_simplify(pstate->rotation, - DRM_MODE_ROTATE_0 | - DRM_MODE_REFLECT_X | - DRM_MODE_REFLECT_Y); - hflip = !!(rotation & DRM_MODE_REFLECT_X); - vflip = !!(rotation & DRM_MODE_REFLECT_Y); - - mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe, - config, hdecm, vdecm, hflip, vflip, - crtc_x, crtc_y, crtc_w, crtc_h, - src_img_w, src_img_h, - src_x, src_y, src_w, src_h); - if (right_hwpipe) - mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe, - config, hdecm, vdecm, hflip, vflip, - crtc_x + crtc_w, crtc_y, crtc_w, crtc_h, - src_img_w, src_img_h, - src_x + src_w, src_y, src_w, src_h); - - plane->fb = fb; - - return ret; -} - -/* - * Use this func and the one below only after the atomic state has been - * successfully swapped - */ -enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - - if (WARN_ON(!pstate->hwpipe)) - return SSPP_NONE; - - return pstate->hwpipe->pipe; -} - -enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - - if (!pstate->r_hwpipe) - return SSPP_NONE; - - return pstate->r_hwpipe->pipe; -} - -uint32_t mdp5_plane_get_flush(struct drm_plane *plane) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - u32 mask; - - if (WARN_ON(!pstate->hwpipe)) - return 0; - - mask = pstate->hwpipe->flush_mask; - - if (pstate->r_hwpipe) - mask |= pstate->r_hwpipe->flush_mask; - - return mask; -} - -/* initialize plane */ -struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum drm_plane_type type) -{ - struct drm_plane *plane = NULL; - struct mdp5_plane *mdp5_plane; - int ret; - - mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); - if (!mdp5_plane) { - ret = -ENOMEM; - goto fail; - } - - plane = &mdp5_plane->base; - - mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, - ARRAY_SIZE(mdp5_plane->formats), false); - - ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, - mdp5_plane->formats, mdp5_plane->nformats, - NULL, type, NULL); - if (ret) - goto fail; - - drm_plane_helper_add(plane, &mdp5_plane_helper_funcs); - - mdp5_plane_install_properties(plane, &plane->base); - - return plane; - -fail: - if (plane) - mdp5_plane_destroy(plane); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c deleted file mode 100644 index ae4983d9d0a5..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright (c) 2014, The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - - -#include "mdp5_kms.h" -#include "mdp5_smp.h" - - -struct mdp5_smp { - struct drm_device *dev; - - uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */ - - int blk_cnt; - int blk_size; - - /* register cache */ - u32 alloc_w[22]; - u32 alloc_r[22]; - u32 pipe_reqprio_fifo_wm0[SSPP_MAX]; - u32 pipe_reqprio_fifo_wm1[SSPP_MAX]; - u32 pipe_reqprio_fifo_wm2[SSPP_MAX]; -}; - -static inline -struct mdp5_kms *get_kms(struct mdp5_smp *smp) -{ - struct msm_drm_private *priv = smp->dev->dev_private; - - return to_mdp5_kms(to_mdp_kms(priv->kms)); -} - -static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) -{ -#define CID_UNUSED 0 - - if (WARN_ON(plane >= pipe2nclients(pipe))) - return CID_UNUSED; - - /* - * Note on SMP clients: - * For ViG pipes, fetch Y/Cr/Cb-components clients are always - * consecutive, and in that order. - * - * e.g.: - * if mdp5_cfg->smp.clients[SSPP_VIG0] = N, - * Y plane's client ID is N - * Cr plane's client ID is N + 1 - * Cb plane's client ID is N + 2 - */ - - return mdp5_cfg->smp.clients[pipe] + plane; -} - -/* allocate blocks for the specified request: */ -static int smp_request_block(struct mdp5_smp *smp, - struct mdp5_smp_state *state, - u32 cid, int nblks) -{ - void *cs = state->client_state[cid]; - int i, avail, cnt = smp->blk_cnt; - uint8_t reserved; - - /* we shouldn't be requesting blocks for an in-use client: */ - WARN_ON(bitmap_weight(cs, cnt) > 0); - - reserved = smp->reserved[cid]; - - if (reserved) { - nblks = max(0, nblks - reserved); - DBG("%d MMBs allocated (%d reserved)", nblks, reserved); - } - - avail = cnt - bitmap_weight(state->state, cnt); - if (nblks > avail) { - dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", - nblks, avail); - return -ENOSPC; - } - - for (i = 0; i < nblks; i++) { - int blk = find_first_zero_bit(state->state, cnt); - set_bit(blk, cs); - set_bit(blk, state->state); - } - - return 0; -} - -static void set_fifo_thresholds(struct mdp5_smp *smp, - enum mdp5_pipe pipe, int nblks) -{ - u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); - u32 val; - - /* 1/4 of SMP pool that is being fetched */ - val = (nblks * smp_entries_per_blk) / 4; - - smp->pipe_reqprio_fifo_wm0[pipe] = val * 1; - smp->pipe_reqprio_fifo_wm1[pipe] = val * 2; - smp->pipe_reqprio_fifo_wm2[pipe] = val * 3; -} - -/* - * NOTE: looks like if horizontal decimation is used (if we supported that) - * then the width used to calculate SMP block requirements is the post- - * decimated width. Ie. SMP buffering sits downstream of decimation (which - * presumably happens during the dma from scanout buffer). - */ -uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, - const struct mdp_format *format, - u32 width, bool hdecim) -{ - struct mdp5_kms *mdp5_kms = get_kms(smp); - int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); - int i, hsub, nplanes, nlines; - u32 fmt = format->base.pixel_format; - uint32_t blkcfg = 0; - - nplanes = drm_format_num_planes(fmt); - hsub = drm_format_horz_chroma_subsampling(fmt); - - /* different if BWC (compressed framebuffer?) enabled: */ - nlines = 2; - - /* Newer MDPs have split/packing logic, which fetches sub-sampled - * U and V components (splits them from Y if necessary) and packs - * them together, writes to SMP using a single client. - */ - if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { - fmt = DRM_FORMAT_NV24; - nplanes = 2; - - /* if decimation is enabled, HW decimates less on the - * sub sampled chroma components - */ - if (hdecim && (hsub > 1)) - hsub = 1; - } - - for (i = 0; i < nplanes; i++) { - int n, fetch_stride, cpp; - - cpp = drm_format_plane_cpp(fmt, i); - fetch_stride = width * cpp / (i ? hsub : 1); - - n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); - - /* for hw rev v1.00 */ - if (rev == 0) - n = roundup_pow_of_two(n); - - blkcfg |= (n << (8 * i)); - } - - return blkcfg; -} - -int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, - enum mdp5_pipe pipe, uint32_t blkcfg) -{ - struct mdp5_kms *mdp5_kms = get_kms(smp); - struct drm_device *dev = mdp5_kms->dev; - int i, ret; - - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - int n = blkcfg & 0xff; - - if (!n) - continue; - - DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); - ret = smp_request_block(smp, state, cid, n); - if (ret) { - dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", - n, ret); - return ret; - } - - blkcfg >>= 8; - } - - state->assigned |= (1 << pipe); - - return 0; -} - -/* Release SMP blocks for all clients of the pipe */ -void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, - enum mdp5_pipe pipe) -{ - int i; - int cnt = smp->blk_cnt; - - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - void *cs = state->client_state[cid]; - - /* update global state: */ - bitmap_andnot(state->state, state->state, cs, cnt); - - /* clear client's state */ - bitmap_zero(cs, cnt); - } - - state->released |= (1 << pipe); -} - -/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to - * happen after scanout completes. - */ -static unsigned update_smp_state(struct mdp5_smp *smp, - u32 cid, mdp5_smp_state_t *assigned) -{ - int cnt = smp->blk_cnt; - unsigned nblks = 0; - u32 blk, val; - - for_each_set_bit(blk, *assigned, cnt) { - int idx = blk / 3; - int fld = blk % 3; - - val = smp->alloc_w[idx]; - - switch (fld) { - case 0: - val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; - val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); - break; - case 1: - val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; - val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); - break; - case 2: - val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; - val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); - break; - } - - smp->alloc_w[idx] = val; - smp->alloc_r[idx] = val; - - nblks++; - } - - return nblks; -} - -static void write_smp_alloc_regs(struct mdp5_smp *smp) -{ - struct mdp5_kms *mdp5_kms = get_kms(smp); - int i, num_regs; - - num_regs = smp->blk_cnt / 3 + 1; - - for (i = 0; i < num_regs; i++) { - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i), - smp->alloc_w[i]); - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i), - smp->alloc_r[i]); - } -} - -static void write_smp_fifo_regs(struct mdp5_smp *smp) -{ - struct mdp5_kms *mdp5_kms = get_kms(smp); - int i; - - for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; - enum mdp5_pipe pipe = hwpipe->pipe; - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), - smp->pipe_reqprio_fifo_wm0[pipe]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), - smp->pipe_reqprio_fifo_wm1[pipe]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), - smp->pipe_reqprio_fifo_wm2[pipe]); - } -} - -void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) -{ - enum mdp5_pipe pipe; - - for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) { - unsigned i, nblks = 0; - - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - void *cs = state->client_state[cid]; - - nblks += update_smp_state(smp, cid, cs); - - DBG("assign %s:%u, %u blks", - pipe2name(pipe), i, nblks); - } - - set_fifo_thresholds(smp, pipe, nblks); - } - - write_smp_alloc_regs(smp); - write_smp_fifo_regs(smp); - - state->assigned = 0; -} - -void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) -{ - enum mdp5_pipe pipe; - - for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) { - DBG("release %s", pipe2name(pipe)); - set_fifo_thresholds(smp, pipe, 0); - } - - write_smp_fifo_regs(smp); - - state->released = 0; -} - -void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) -{ - struct mdp5_kms *mdp5_kms = get_kms(smp); - struct mdp5_hw_pipe_state *hwpstate; - struct mdp5_smp_state *state; - int total = 0, i, j; - - drm_printf(p, "name\tinuse\tplane\n"); - drm_printf(p, "----\t-----\t-----\n"); - - if (drm_can_sleep()) - drm_modeset_lock(&mdp5_kms->state_lock, NULL); - - /* grab these *after* we hold the state_lock */ - hwpstate = &mdp5_kms->state->hwpipe; - state = &mdp5_kms->state->smp; - - for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; - struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx]; - enum mdp5_pipe pipe = hwpipe->pipe; - for (j = 0; j < pipe2nclients(pipe); j++) { - u32 cid = pipe2client(pipe, j); - void *cs = state->client_state[cid]; - int inuse = bitmap_weight(cs, smp->blk_cnt); - - drm_printf(p, "%s:%d\t%d\t%s\n", - pipe2name(pipe), j, inuse, - plane ? plane->name : NULL); - - total += inuse; - } - } - - drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt); - drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt - - bitmap_weight(state->state, smp->blk_cnt)); - - if (drm_can_sleep()) - drm_modeset_unlock(&mdp5_kms->state_lock); -} - -void mdp5_smp_destroy(struct mdp5_smp *smp) -{ - kfree(smp); -} - -struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) -{ - struct mdp5_smp_state *state = &mdp5_kms->state->smp; - struct mdp5_smp *smp = NULL; - int ret; - - smp = kzalloc(sizeof(*smp), GFP_KERNEL); - if (unlikely(!smp)) { - ret = -ENOMEM; - goto fail; - } - - smp->dev = mdp5_kms->dev; - smp->blk_cnt = cfg->mmb_count; - smp->blk_size = cfg->mmb_size; - - /* statically tied MMBs cannot be re-allocated: */ - bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); - memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); - - return smp; -fail: - if (smp) - mdp5_smp_destroy(smp); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h deleted file mode 100644 index b41d0448fbe8..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2014, The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MDP5_SMP_H__ -#define __MDP5_SMP_H__ - -#include - -#include "msm_drv.h" - -/* - * SMP - Shared Memory Pool: - * - * SMP blocks are shared between all the clients, where each plane in - * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on - * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. - * - * Based on the size of the attached scanout buffer, a certain # of - * blocks must be allocated to that client out of the shared pool. - * - * In some hw, some blocks are statically allocated for certain pipes - * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). - * - * - * Atomic SMP State: - * - * On atomic updates that modify SMP configuration, the state is cloned - * (copied) and modified. For test-only, or in cases where atomic - * update fails (or if we hit ww_mutex deadlock/backoff condition) the - * new state is simply thrown away. - * - * Because the SMP registers are not double buffered, updates are a - * two step process: - * - * 1) in _prepare_commit() we configure things (via read-modify-write) - * for the newly assigned pipes, so we don't take away blocks - * assigned to pipes that are still scanning out - * 2) in _complete_commit(), after vblank/etc, we clear things for the - * released clients, since at that point old pipes are no longer - * scanning out. - */ -struct mdp5_smp_state { - /* global state of what blocks are in use: */ - mdp5_smp_state_t state; - - /* per client state of what blocks they are using: */ - mdp5_smp_state_t client_state[MAX_CLIENTS]; - - /* assigned pipes (hw updated at _prepare_commit()): */ - unsigned long assigned; - - /* released pipes (hw updated at _complete_commit()): */ - unsigned long released; -}; - -struct mdp5_kms; -struct mdp5_smp; - -/* - * SMP module prototypes: - * mdp5_smp_init() returns a SMP @handler, - * which is then used to call the other mdp5_smp_*(handler, ...) functions. - */ - -struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, - const struct mdp5_smp_block *cfg); -void mdp5_smp_destroy(struct mdp5_smp *smp); - -void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); - -uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, - const struct mdp_format *format, - u32 width, bool hdecim); - -int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, - enum mdp5_pipe pipe, uint32_t blkcfg); -void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, - enum mdp5_pipe pipe); - -void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); -void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); - -#endif /* __MDP5_SMP_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h deleted file mode 100644 index 1494c407be44..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef MDP_COMMON_XML -#define MDP_COMMON_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) - -Copyright (C) 2013-2017 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mdp_chroma_samp_type { - CHROMA_FULL = 0, - CHROMA_H2V1 = 1, - CHROMA_H1V2 = 2, - CHROMA_420 = 3, -}; - -enum mdp_fetch_type { - MDP_PLANE_INTERLEAVED = 0, - MDP_PLANE_PLANAR = 1, - MDP_PLANE_PSEUDO_PLANAR = 2, -}; - -enum mdp_mixer_stage_id { - STAGE_UNUSED = 0, - STAGE_BASE = 1, - STAGE0 = 2, - STAGE1 = 3, - STAGE2 = 4, - STAGE3 = 5, - STAGE4 = 6, - STAGE5 = 7, - STAGE6 = 8, - STAGE_MAX = 8, -}; - -enum mdp_alpha_type { - FG_CONST = 0, - BG_CONST = 1, - FG_PIXEL = 2, - BG_PIXEL = 3, -}; - -enum mdp_component_type { - COMP_0 = 0, - COMP_1_2 = 1, - COMP_3 = 2, - COMP_MAX = 3, -}; - -enum mdp_bpc { - BPC1 = 0, - BPC5 = 1, - BPC6 = 2, - BPC8 = 3, -}; - -enum mdp_bpc_alpha { - BPC1A = 0, - BPC4A = 1, - BPC6A = 2, - BPC8A = 3, -}; - - -#endif /* MDP_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c deleted file mode 100644 index b4a8aa4490ee..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp_format.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - - -#include "msm_drv.h" -#include "mdp_kms.h" - -static struct csc_cfg csc_convert[CSC_MAX] = { - [CSC_RGB2RGB] = { - .type = CSC_RGB2RGB, - .matrix = { - 0x0200, 0x0000, 0x0000, - 0x0000, 0x0200, 0x0000, - 0x0000, 0x0000, 0x0200 - }, - .pre_bias = { 0x0, 0x0, 0x0 }, - .post_bias = { 0x0, 0x0, 0x0 }, - .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, - .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, - }, - [CSC_YUV2RGB] = { - .type = CSC_YUV2RGB, - .matrix = { - 0x0254, 0x0000, 0x0331, - 0x0254, 0xff37, 0xfe60, - 0x0254, 0x0409, 0x0000 - }, - .pre_bias = { 0xfff0, 0xff80, 0xff80 }, - .post_bias = { 0x00, 0x00, 0x00 }, - .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, - .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, - }, - [CSC_RGB2YUV] = { - .type = CSC_RGB2YUV, - .matrix = { - 0x0083, 0x0102, 0x0032, - 0x1fb5, 0x1f6c, 0x00e1, - 0x00e1, 0x1f45, 0x1fdc - }, - .pre_bias = { 0x00, 0x00, 0x00 }, - .post_bias = { 0x10, 0x80, 0x80 }, - .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, - .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 }, - }, - [CSC_YUV2YUV] = { - .type = CSC_YUV2YUV, - .matrix = { - 0x0200, 0x0000, 0x0000, - 0x0000, 0x0200, 0x0000, - 0x0000, 0x0000, 0x0200 - }, - .pre_bias = { 0x00, 0x00, 0x00 }, - .post_bias = { 0x00, 0x00, 0x00 }, - .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, - .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, - }, -}; - -#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ - .base = { .pixel_format = DRM_FORMAT_ ## name }, \ - .bpc_a = BPC ## a ## A, \ - .bpc_r = BPC ## r, \ - .bpc_g = BPC ## g, \ - .bpc_b = BPC ## b, \ - .unpack = { e0, e1, e2, e3 }, \ - .alpha_enable = alpha, \ - .unpack_tight = tight, \ - .cpp = c, \ - .unpack_count = cnt, \ - .fetch_type = fp, \ - .chroma_sample = cs, \ - .is_yuv = yuv, \ -} - -#define BPC0A 0 - -/* - * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking - * mdp_get_rgb_formats()'s implementation. - */ -static const struct mdp_format formats[] = { - /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ - FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - - /* --- RGB formats above / YUV formats below this line --- */ - - /* 2 plane YUV */ - FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), - FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), - FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), - FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), - /* 1 plane YUV */ - FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - /* 3 plane YUV */ - FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1, - MDP_PLANE_PLANAR, CHROMA_420, true), - FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1, - MDP_PLANE_PLANAR, CHROMA_420, true), -}; - -/* - * Note: - * @rgb_only must be set to true, when requesting - * supported formats for RGB pipes. - */ -uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, - bool rgb_only) -{ - uint32_t i; - for (i = 0; i < ARRAY_SIZE(formats); i++) { - const struct mdp_format *f = &formats[i]; - - if (i == max_formats) - break; - - if (rgb_only && MDP_FORMAT_IS_YUV(f)) - break; - - pixel_formats[i] = f->base.pixel_format; - } - - return i; -} - -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) -{ - int i; - for (i = 0; i < ARRAY_SIZE(formats); i++) { - const struct mdp_format *f = &formats[i]; - if (f->base.pixel_format == format) - return &f->base; - } - return NULL; -} - -struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type) -{ - if (unlikely(WARN_ON(type >= CSC_MAX))) - return NULL; - - return &csc_convert[type]; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c deleted file mode 100644 index 64287304054d..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - - -#include "msm_drv.h" -#include "mdp_kms.h" - - -struct mdp_irq_wait { - struct mdp_irq irq; - int count; -}; - -static DECLARE_WAIT_QUEUE_HEAD(wait_event); - -static DEFINE_SPINLOCK(list_lock); - -static void update_irq(struct mdp_kms *mdp_kms) -{ - struct mdp_irq *irq; - uint32_t irqmask = mdp_kms->vblank_mask; - - assert_spin_locked(&list_lock); - - list_for_each_entry(irq, &mdp_kms->irq_list, node) - irqmask |= irq->irqmask; - - mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask); - mdp_kms->cur_irq_mask = irqmask; -} - -/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder - * link changes, this must be called to figure out the new global irqmask - */ -void mdp_irq_update(struct mdp_kms *mdp_kms) -{ - unsigned long flags; - spin_lock_irqsave(&list_lock, flags); - update_irq(mdp_kms); - spin_unlock_irqrestore(&list_lock, flags); -} - -void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status) -{ - struct mdp_irq *handler, *n; - unsigned long flags; - - spin_lock_irqsave(&list_lock, flags); - mdp_kms->in_irq = true; - list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { - if (handler->irqmask & status) { - spin_unlock_irqrestore(&list_lock, flags); - handler->irq(handler, handler->irqmask & status); - spin_lock_irqsave(&list_lock, flags); - } - } - mdp_kms->in_irq = false; - update_irq(mdp_kms); - spin_unlock_irqrestore(&list_lock, flags); - -} - -void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) -{ - unsigned long flags; - - spin_lock_irqsave(&list_lock, flags); - if (enable) - mdp_kms->vblank_mask |= mask; - else - mdp_kms->vblank_mask &= ~mask; - update_irq(mdp_kms); - spin_unlock_irqrestore(&list_lock, flags); -} - -static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus) -{ - struct mdp_irq_wait *wait = - container_of(irq, struct mdp_irq_wait, irq); - wait->count--; - wake_up_all(&wait_event); -} - -void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask) -{ - struct mdp_irq_wait wait = { - .irq = { - .irq = wait_irq, - .irqmask = irqmask, - }, - .count = 1, - }; - mdp_irq_register(mdp_kms, &wait.irq); - wait_event_timeout(wait_event, (wait.count <= 0), - msecs_to_jiffies(100)); - mdp_irq_unregister(mdp_kms, &wait.irq); -} - -void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) -{ - unsigned long flags; - bool needs_update = false; - - spin_lock_irqsave(&list_lock, flags); - - if (!irq->registered) { - irq->registered = true; - list_add(&irq->node, &mdp_kms->irq_list); - needs_update = !mdp_kms->in_irq; - } - - spin_unlock_irqrestore(&list_lock, flags); - - if (needs_update) - mdp_irq_update(mdp_kms); -} - -void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) -{ - unsigned long flags; - bool needs_update = false; - - spin_lock_irqsave(&list_lock, flags); - - if (irq->registered) { - irq->registered = false; - list_del(&irq->node); - needs_update = !mdp_kms->in_irq; - } - - spin_unlock_irqrestore(&list_lock, flags); - - if (needs_update) - mdp_irq_update(mdp_kms); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h deleted file mode 100644 index 1185487e7e5e..000000000000 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (C) 2013 Red Hat - * Author: Rob Clark - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#ifndef __MDP_KMS_H__ -#define __MDP_KMS_H__ - -#include -#include -#include - -#include "msm_drv.h" -#include "msm_kms.h" -#include "mdp_common.xml.h" - -struct mdp_kms; - -struct mdp_kms_funcs { - struct msm_kms_funcs base; - void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask, - uint32_t old_irqmask); -}; - -struct mdp_kms { - struct msm_kms base; - - const struct mdp_kms_funcs *funcs; - - /* irq handling: */ - bool in_irq; - struct list_head irq_list; /* list of mdp4_irq */ - uint32_t vblank_mask; /* irq bits set for userspace vblank */ - uint32_t cur_irq_mask; /* current irq mask */ -}; -#define to_mdp_kms(x) container_of(x, struct mdp_kms, base) - -static inline void mdp_kms_init(struct mdp_kms *mdp_kms, - const struct mdp_kms_funcs *funcs) -{ - mdp_kms->funcs = funcs; - INIT_LIST_HEAD(&mdp_kms->irq_list); - msm_kms_init(&mdp_kms->base, &funcs->base); -} - -/* - * irq helpers: - */ - -/* For transiently registering for different MDP irqs that various parts - * of the KMS code need during setup/configuration. These are not - * necessarily the same as what drm_vblank_get/put() are requesting, and - * the hysteresis in drm_vblank_put() is not necessarily desirable for - * internal housekeeping related irq usage. - */ -struct mdp_irq { - struct list_head node; - uint32_t irqmask; - bool registered; - void (*irq)(struct mdp_irq *irq, uint32_t irqstatus); -}; - -void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status); -void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable); -void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); -void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); -void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); -void mdp_irq_update(struct mdp_kms *mdp_kms); - -/* - * pixel format helpers: - */ - -struct mdp_format { - struct msm_format base; - enum mdp_bpc bpc_r, bpc_g, bpc_b; - enum mdp_bpc_alpha bpc_a; - uint8_t unpack[4]; - bool alpha_enable, unpack_tight; - uint8_t cpp, unpack_count; - enum mdp_fetch_type fetch_type; - enum mdp_chroma_samp_type chroma_sample; - bool is_yuv; -}; -#define to_mdp_format(x) container_of(x, struct mdp_format, base) -#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) - -uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); - -/* MDP capabilities */ -#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ -#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ -#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ -#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */ - -/* MDP pipe capabilities */ -#define MDP_PIPE_CAP_HFLIP BIT(0) -#define MDP_PIPE_CAP_VFLIP BIT(1) -#define MDP_PIPE_CAP_SCALE BIT(2) -#define MDP_PIPE_CAP_CSC BIT(3) -#define MDP_PIPE_CAP_DECIMATION BIT(4) -#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) -#define MDP_PIPE_CAP_CURSOR BIT(6) - -/* MDP layer mixer caps */ -#define MDP_LM_CAP_DISPLAY BIT(0) -#define MDP_LM_CAP_WB BIT(1) -#define MDP_LM_CAP_PAIR BIT(2) - -static inline bool pipe_supports_yuv(uint32_t pipe_caps) -{ - return (pipe_caps & MDP_PIPE_CAP_SCALE) && - (pipe_caps & MDP_PIPE_CAP_CSC); -} - -enum csc_type { - CSC_RGB2RGB = 0, - CSC_YUV2RGB, - CSC_RGB2YUV, - CSC_YUV2YUV, - CSC_MAX -}; - -struct csc_cfg { - enum csc_type type; - uint32_t matrix[9]; - uint32_t pre_bias[3]; - uint32_t post_bias[3]; - uint32_t pre_clamp[6]; - uint32_t post_clamp[6]; -}; - -struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type); - -#endif /* __MDP_KMS_H__ */ -- cgit v1.2.3 From 94c3e78d873f02379b2a7a7e4cfb020fd81f1bb7 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 14 Feb 2018 10:46:19 -0500 Subject: drm/msm: strip out msm_fence_cb Remnants of pre-dma_fence fencing which got left behind by mistake. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_drv.h | 1 - drivers/gpu/drm/msm/msm_fence.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 0a653dd2e618..48ed5b9a8580 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -51,7 +51,6 @@ struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; -struct msm_fence_cb; struct msm_gem_address_space; struct msm_gem_vma; diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index 1aa6a4c6530c..b9fe059091f2 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -37,8 +37,6 @@ void msm_fence_context_free(struct msm_fence_context *fctx); int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, ktime_t *timeout, bool interruptible); -int msm_queue_fence_cb(struct msm_fence_context *fctx, - struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); -- cgit v1.2.3 From d71b6bd80d96f15f4ae393d3bec0636c960e289a Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 14 Feb 2018 11:14:23 -0500 Subject: drm/msm/dsi: fix direct caller of msm_gem_free_object() This should be using drm_gem_object_put(). Also since this is done only in driver unload path, we don't need to synchronize setting tx_gem_obj to NULL, so juse use the _unlocked() variant. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/dsi/dsi_host.c | 4 +--- drivers/gpu/drm/msm/msm_gem.c | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 62ac614eccf9..7a03a9489708 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1063,10 +1063,8 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) if (msm_host->tx_gem_obj) { msm_gem_put_iova(msm_host->tx_gem_obj, 0); - mutex_lock(&dev->struct_mutex); - msm_gem_free_object(msm_host->tx_gem_obj); + drm_gem_object_put_unlocked(msm_host->tx_gem_obj); msm_host->tx_gem_obj = NULL; - mutex_unlock(&dev->struct_mutex); } if (msm_host->tx_buf) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 0e5073af3913..95196479f651 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -798,6 +798,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) } #endif +/* don't call directly! Use drm_gem_object_put() and friends */ void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; -- cgit v1.2.3 From 79d57bf6fa3bcc0ec5fc3b8140c4df1d696f593b Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 13 Feb 2018 22:46:58 -0800 Subject: drm/msm: Trigger fence completion from GPU Interrupt commands causes the CP to trigger an interrupt as the command is processed, regardless of the GPU being done processing previous commands. This is seen by the interrupt being delivered before the fence is written on 8974 and is likely the cause of the additional CP_WAIT_FOR_IDLE workaround found for a306, which would cause the CP to wait for the GPU to go idle before triggering the interrupt. Instead we can set the (undocumented) BIT(31) of the CACHE_FLUSH_TS which will cause a special CACHE_FLUSH_TS interrupt to be triggered from the GPU as the write event is processed. Add CACHE_FLUSH_TS to the IRQ masks of A3xx and A4xx and remove the workaround for A306. Suggested-by: Jordan Crouse Signed-off-by: Bjorn Andersson Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 1 + drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 1 + drivers/gpu/drm/msm/adreno/adreno_gpu.c | 18 ++---------------- 3 files changed, 4 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 1dd84d3489ae..3ebbeb3a9b68 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -35,6 +35,7 @@ A3XX_INT0_CP_RB_INT | \ A3XX_INT0_CP_REG_PROTECT_FAULT | \ A3XX_INT0_CP_AHB_ERROR_HALT | \ + A3XX_INT0_CACHE_FLUSH_TS | \ A3XX_INT0_UCHE_OOB_ACCESS) extern bool hang_debug; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 2884b1b1660c..16d3d596638e 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -27,6 +27,7 @@ A4XX_INT0_CP_RB_INT | \ A4XX_INT0_CP_REG_PROTECT_FAULT | \ A4XX_INT0_CP_AHB_ERROR_HALT | \ + A4XX_INT0_CACHE_FLUSH_TS | \ A4XX_INT0_UCHE_OOB_ACCESS) extern bool hang_debug; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 87133c6c6f91..17d0506d058c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -313,26 +313,12 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, OUT_RING(ring, 0x00000000); } + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS); + OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); - /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ - OUT_PKT3(ring, CP_INTERRUPT, 1); - OUT_RING(ring, 0x80000000); - - /* Workaround for missing irq issue on 8x16/a306. Unsure if the - * root cause is a platform issue or some a306 quirk, but this - * keeps things humming along: - */ - if (adreno_is_a306(adreno_gpu)) { - OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); - OUT_RING(ring, 0x00000000); - OUT_PKT3(ring, CP_INTERRUPT, 1); - OUT_RING(ring, 0x80000000); - } - #if 0 if (adreno_is_a3xx(adreno_gpu)) { /* Dummy set-constant to trigger context rollover */ -- cgit v1.2.3 From f9cb8d8d836e155f361c3f1bbe0802ae1f98a17e Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 19 Feb 2018 08:17:06 -0500 Subject: drm/msm/mdp5: rework CTL START signal handling For DSI cmd-mode and writeback, we need to write the CTL's START register to kick things off, but we only want to do that once both the encoder and the crtc have a chance to write their corresponding flush bits. The difficulty is that when there is a full modeset (ie. encoder state has changed) we want to defer the start until encoder->enable(). But if only plane's have changed, we want to do this from crtc->commit(). The start_mask was a previous attempt to handle this, but it didn't really do the right thing since atomic conversion. Instead track in the crtc state that the start should be deferred, set to try from encoder's (or in future writeback's) atomic_check(). This way the state is part of the atomic state, and rollback can work properly if an atomic test fails. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c | 4 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 6 ++- drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 52 +++++++----------------- drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h | 2 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c | 5 ++- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 8 ++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 2 +- 7 files changed, 35 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c index 1abc7f5c345c..d6f79dc755b4 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -159,7 +159,7 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) pingpong_tearcheck_disable(encoder); mdp5_ctl_set_encoder_state(ctl, pipeline, false); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); bs_set(mdp5_cmd_enc, 0); @@ -180,7 +180,7 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) if (pingpong_tearcheck_enable(encoder)) return; - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); mdp5_ctl_set_encoder_state(ctl, pipeline, true); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 8c5ed0b59e46..91c829a2cc85 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -97,9 +97,13 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_ctl *ctl = mdp5_cstate->ctl; struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + bool start = !mdp5_cstate->defer_start; + + mdp5_cstate->defer_start = false; DBG("%s: flush=%08x", crtc->name, flush_mask); - return mdp5_ctl_commit(ctl, pipeline, flush_mask); + + return mdp5_ctl_commit(ctl, pipeline, flush_mask, start); } /* diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 439e0a300e25..1197f060c5c6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -41,7 +41,9 @@ struct mdp5_ctl { u32 status; bool encoder_enabled; - uint32_t start_mask; + + /* pending flush_mask bits */ + u32 flush_mask; /* REG_MDP5_CTL_*() registers access info + lock: */ spinlock_t hw_lock; @@ -173,16 +175,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) { - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); struct mdp5_interface *intf = pipeline->intf; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - - ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | - mdp_ctl_flush_mask_encoder(intf); - if (r_mixer) - ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ if (!mdp5_cfg_intf_is_virtual(intf->type)) @@ -198,7 +192,7 @@ static bool start_signal_needed(struct mdp5_ctl *ctl, { struct mdp5_interface *intf = pipeline->intf; - if (!ctl->encoder_enabled || ctl->start_mask != 0) + if (!ctl->encoder_enabled) return false; switch (intf->type) { @@ -227,25 +221,6 @@ static void send_start_signal(struct mdp5_ctl *ctl) spin_unlock_irqrestore(&ctl->hw_lock, flags); } -static void refill_start_mask(struct mdp5_ctl *ctl, - struct mdp5_pipeline *pipeline) -{ - struct mdp5_interface *intf = pipeline->intf; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - - ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm); - if (r_mixer) - ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); - - /* - * Writeback encoder needs to program & flush - * address registers for each page flip.. - */ - if (intf->type == INTF_WB) - ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf); -} - /** * mdp5_ctl_set_encoder_state() - set the encoder state * @@ -268,7 +243,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); - refill_start_mask(ctl, pipeline); } return 0; @@ -557,17 +531,14 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, */ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - u32 flush_mask) + u32 flush_mask, bool start) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; unsigned long flags; u32 flush_id = ctl->id; u32 curr_ctl_flush_mask; - ctl->start_mask &= ~flush_mask; - - VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, - ctl->start_mask, ctl->pending_ctl_trigger); + VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); if (ctl->pending_ctl_trigger & flush_mask) { flush_mask |= MDP5_CTL_FLUSH_CTL; @@ -582,6 +553,14 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, fix_for_single_flush(ctl, &flush_mask, &flush_id); + if (!start) { + ctl->flush_mask |= flush_mask; + return curr_ctl_flush_mask; + } else { + flush_mask |= ctl->flush_mask; + ctl->flush_mask = 0; + } + if (flush_mask) { spin_lock_irqsave(&ctl->hw_lock, flags); ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); @@ -590,7 +569,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); - refill_start_mask(ctl, pipeline); } return curr_ctl_flush_mask; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h index b63120388dc6..403b0db0fa4c 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -78,7 +78,7 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); /* @flush_mask: see CTL flush masks definitions below */ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - u32 flush_mask); + u32 flush_mask, bool start); u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c index 36ad3cbe5f79..9af94e35f678 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -228,7 +228,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); /* * Wait for a vsync so we know the ENABLE=0 latched before @@ -262,7 +262,7 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); mdp5_ctl_set_encoder_state(ctl, pipeline, true); @@ -319,6 +319,7 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, mdp5_cstate->ctl = ctl; mdp5_cstate->pipeline.intf = intf; + mdp5_cstate->defer_start = true; return 0; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index aeb94aa461b5..425a03d213e5 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -133,6 +133,14 @@ struct mdp5_crtc_state { u32 pp_done_irqmask; bool cmd_mode; + + /* should we not write CTL[n].START register on flush? If the + * encoder has changed this is set to true, since encoder->enable() + * is called after crtc state is committed, but we only want to + * write the CTL[n].START register once. This lets us defer + * writing CTL[n].START until encoder->enable() + */ + bool defer_start; }; #define to_mdp5_crtc_state(x) \ container_of(x, struct mdp5_crtc_state, base) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 98d4d7331767..5dc42d89b588 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -545,7 +545,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane, ctl = mdp5_crtc_get_ctl(new_state->crtc); - mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true); } *to_mdp5_plane_state(plane->state) = -- cgit v1.2.3 From 1af817909d092a35d002f7f2bdd2ef56d349e6bb Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 19 Feb 2018 08:27:13 -0500 Subject: drm/msm/mdp5: print a bit more of the atomic state Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 91c829a2cc85..9893e43ba6c5 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -951,12 +951,17 @@ mdp5_crtc_atomic_print_state(struct drm_printer *p, if (WARN_ON(!pipeline)) return; + if (mdp5_cstate->ctl) + drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl)); + drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ? pipeline->mixer->name : "(null)"); if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ? pipeline->r_mixer->name : "(null)"); + + drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode); } static void mdp5_crtc_reset(struct drm_crtc *crtc) -- cgit v1.2.3 From 583c13fd77c7db57add9de84a5e43fbf2be42168 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 19 Feb 2018 08:29:33 -0500 Subject: drm/msm/mdp5: add missing LM flush bits For some reason, layer-mixer 3 and 4 were missing. LM3 is used for writeback on 8x16. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 1197f060c5c6..6b605562e572 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -468,6 +468,8 @@ u32 mdp_ctl_flush_mask_lm(int lm) case 0: return MDP5_CTL_FLUSH_LM0; case 1: return MDP5_CTL_FLUSH_LM1; case 2: return MDP5_CTL_FLUSH_LM2; + case 3: return MDP5_CTL_FLUSH_LM3; + case 4: return MDP5_CTL_FLUSH_LM4; case 5: return MDP5_CTL_FLUSH_LM5; default: return 0; } -- cgit v1.2.3 From 61b734cb7cac6b06d0b33635622b51066e8da32f Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 19 Feb 2018 08:31:29 -0500 Subject: drm/msm/mdp5: don't pre-reserve LM's if no dual-dsi If there is only a single DSI interface, don't reserve the first two layer-mixers for the dual-DSI use-case. This was causing problems for WB, not being able to assign a LM, on 8x16, which has only two LM's and a single DSI. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 6b605562e572..f93d5681267c 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -691,6 +691,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, struct mdp5_ctl_manager *ctl_mgr; const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); int rev = mdp5_cfg_get_hw_rev(cfg_hnd); + unsigned dsi_cnt = 0; const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -740,7 +741,10 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. * Single FLUSH is supported from hw rev v3.0. */ - if (rev >= 3) { + for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) + if (hw_cfg->intf.connect[c] == INTF_DSI) + dsi_cnt++; + if ((rev >= 3) && (dsi_cnt > 1)) { ctl_mgr->single_flush_supported = true; /* Reserve CTL0/1 for INTF1/2 */ ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; -- cgit v1.2.3 From 288e5c8898c488298c39ff4bbf58928d30fbf99f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 26 Feb 2018 10:49:26 +0100 Subject: drm/msm: fix building without debugfs The adreno driver stopped building when CONFIG_DEBUGFS is disabled: drivers/gpu/drm/msm/adreno/adreno_device.c: In function 'adreno_load_gpu': drivers/gpu/drm/msm/adreno/adreno_device.c:153:16: error: 'const struct msm_gpu_funcs' has no member named 'debugfs_init' if (gpu->funcs->debugfs_init) { ^~ drivers/gpu/drm/msm/adreno/adreno_device.c:154:13: error: 'const struct msm_gpu_funcs' has no member named 'debugfs_init' gpu->funcs->debugfs_init(gpu, dev->primary); ^~ This adds an #ifdef around the code that references the hidden pointer. Fixes: 331dc0bc195b ("drm/msm: add a5xx specific debugfs") Signed-off-by: Arnd Bergmann Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/adreno_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index f07d3ec7d77b..8e0cb161754b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -164,11 +164,13 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) return NULL; } +#ifdef CONFIG_DEBUG_FS if (gpu->funcs->debugfs_init) { gpu->funcs->debugfs_init(gpu, dev->primary); gpu->funcs->debugfs_init(gpu, dev->render); gpu->funcs->debugfs_init(gpu, dev->control); } +#endif return gpu; } -- cgit v1.2.3