diff options
32 files changed, 1259 insertions, 613 deletions
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 5044dfb8e3d6..b7a82ed5788f 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o mediatek-drm-hdmi-objs := mtk_cec.o \ mtk_hdmi.o \ mtk_hdmi_ddc.o \ - mtk_mt2701_hdmi_phy.o \ + mtk_mt2701_hdmi_phy.o \ mtk_mt8173_hdmi_phy.o \ mtk_hdmi_phy.o diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 59de2a46aa49..6fb0d6983a4a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -9,6 +9,7 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_color *color = comp_to_color(comp); - writel(w, comp->regs + DISP_COLOR_WIDTH(color)); - writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); + mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color)); + mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color)); } static void mtk_color_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 4a55bb6e2213..891d80c73e04 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -11,6 +11,7 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -124,14 +125,15 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp) static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { if (w != 0 && h != 0) - writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE); - writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR); + mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp, + DISP_REG_OVL_ROI_SIZE); + mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR); - writel(0x1, comp->regs + DISP_REG_OVL_RST); - writel(0x0, comp->regs + DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST); } static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) @@ -175,16 +177,16 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, return 0; } -static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) +static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) { - unsigned int reg; unsigned int gmc_thrshd_l; unsigned int gmc_thrshd_h; unsigned int gmc_value; struct mtk_disp_ovl *ovl = comp_to_ovl(comp); - writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); - + mtk_ddp_write(cmdq_pkt, 0x1, comp, + DISP_REG_OVL_RDMA_CTRL(idx)); gmc_thrshd_l = GMC_THRESHOLD_LOW >> (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); gmc_thrshd_h = GMC_THRESHOLD_HIGH >> @@ -194,22 +196,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) else gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | gmc_thrshd_h << 16 | gmc_thrshd_h << 24; - writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); - - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); - reg = reg | BIT(idx); - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); + mtk_ddp_write(cmdq_pkt, gmc_value, + comp, DISP_REG_OVL_RDMA_GMC(idx)); + mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp, + DISP_REG_OVL_SRC_CON, BIT(idx)); } -static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) +static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) { - unsigned int reg; - - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); - reg = reg & ~BIT(idx); - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); - - writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); + mtk_ddp_write_mask(cmdq_pkt, 0, comp, + DISP_REG_OVL_SRC_CON, BIT(idx)); + mtk_ddp_write(cmdq_pkt, 0, comp, + DISP_REG_OVL_RDMA_CTRL(idx)); } static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) @@ -249,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) } static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_ovl *ovl = comp_to_ovl(comp); struct mtk_plane_pending_state *pending = &state->pending; @@ -260,11 +260,13 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, unsigned int src_size = (pending->height << 16) | pending->width; unsigned int con; - if (!pending->enable) - mtk_ovl_layer_off(comp, idx); + if (!pending->enable) { + mtk_ovl_layer_off(comp, idx, cmdq_pkt); + return; + } con = ovl_fmt_convert(ovl, fmt); - if (idx != 0) + if (state->base.fb->format->has_alpha) con |= OVL_CON_AEN | OVL_CON_ALPHA; if (pending->rotation & DRM_MODE_REFLECT_Y) { @@ -277,14 +279,18 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, addr += pending->pitch - 1; } - writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); - writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); - writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); - writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx)); - writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx)); - - if (pending->enable) - mtk_ovl_layer_on(comp, idx); + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, + DISP_REG_OVL_CON(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, + DISP_REG_OVL_PITCH(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp, + DISP_REG_OVL_SRC_SIZE(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, offset, comp, + DISP_REG_OVL_OFFSET(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, + DISP_REG_OVL_ADDR(ovl, idx)); + + mtk_ovl_layer_on(comp, idx, cmdq_pkt); } static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp) @@ -313,8 +319,6 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .disable_vblank = mtk_ovl_disable_vblank, .supported_rotations = mtk_ovl_supported_rotations, .layer_nr = mtk_ovl_layer_nr, - .layer_on = mtk_ovl_layer_on, - .layer_off = mtk_ovl_layer_off, .layer_check = mtk_ovl_layer_check, .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 405afef31407..0cb848d64206 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -9,6 +9,7 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp) static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, unsigned int height, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { unsigned int threshold; unsigned int reg; struct mtk_disp_rdma *rdma = comp_to_rdma(comp); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height); + mtk_ddp_write_mask(cmdq_pkt, width, comp, + DISP_REG_RDMA_SIZE_CON_0, 0xfff); + mtk_ddp_write_mask(cmdq_pkt, height, comp, + DISP_REG_RDMA_SIZE_CON_1, 0xfffff); /* * Enable FIFO underflow since DSI and DPI can't be blocked. @@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, reg = RDMA_FIFO_UNDERFLOW_EN | RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) | RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); - writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); + mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON); } static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, @@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) } static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_rdma *rdma = comp_to_rdma(comp); struct mtk_plane_pending_state *pending = &state->pending; @@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, unsigned int con; con = rdma_fmt_convert(rdma, fmt); - writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON); if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_INT_MTX_SEL, - RDMA_MATRIX_INT_MTX_BT601_to_RGB); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB, + comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL); } else { - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_ENABLE, 0); + mtk_ddp_write_mask(cmdq_pkt, 0, comp, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); } + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH); + mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp, + DISP_RDMA_MEM_GMC_SETTING_0); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp, + DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY); - writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); - writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); - writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); - rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, - RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); } static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index f80a8ba75977..0dfcd1787e65 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -5,6 +5,7 @@ #include <linux/clk.h> #include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include <asm/barrier.h> #include <soc/mediatek/smi.h> @@ -42,11 +43,20 @@ struct mtk_drm_crtc { struct drm_plane *planes; unsigned int layer_nr; bool pending_planes; + bool pending_async_planes; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_client *cmdq_client; + u32 cmdq_event; +#endif void __iomem *config_regs; struct mtk_disp_mutex *mutex; unsigned int ddp_comp_nr; struct mtk_ddp_comp **ddp_comp; + + /* lock for display hardware access */ + struct mutex hw_lock; }; struct mtk_crtc_state { @@ -215,11 +225,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp; int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { comp = mtk_crtc->ddp_comp[i]; - if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { - *local_layer = plane->index - count; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; return comp; } count += mtk_ddp_comp_layer_nr(comp); @@ -229,6 +240,13 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, return NULL; } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static void ddp_cmdq_cb(struct cmdq_cb_data data) +{ + cmdq_pkt_destroy(data.data); +} +#endif + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -297,7 +315,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) if (i == 1) mtk_ddp_comp_bgclr_in_on(comp); - mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); mtk_ddp_comp_start(comp); } @@ -310,7 +328,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) plane_state = to_mtk_plane_state(plane->state); comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, NULL); } return 0; @@ -325,6 +345,7 @@ err_pm_runtime_put: static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; int i; DRM_DEBUG_DRIVER("%s\n", __func__); @@ -350,9 +371,17 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) mtk_disp_mutex_unprepare(mtk_crtc->mutex); pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); + } } -static void mtk_crtc_ddp_config(struct drm_crtc *crtc) +static void mtk_crtc_ddp_config(struct drm_crtc *crtc, + struct cmdq_pkt *cmdq_handle) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); @@ -368,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) if (state->pending_config) { mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, - state->pending_vrefresh, 0); + state->pending_vrefresh, 0, + cmdq_handle); state->pending_config = false; } @@ -386,12 +416,84 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.async_config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } +} + +static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_handle; +#endif + struct drm_crtc *crtc = &mtk_crtc->base; + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; + + mutex_lock(&mtk_crtc->hw_lock); + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + if (plane_state->pending.dirty) { + plane_state->pending.config = true; + plane_state->pending.dirty = false; + pending_planes |= BIT(i); + } else if (plane_state->pending.async_dirty) { + plane_state->pending.async_config = true; + plane_state->pending.async_dirty = false; + pending_async_planes |= BIT(i); + } + } + if (pending_planes) + mtk_crtc->pending_planes = true; + if (pending_async_planes) + mtk_crtc->pending_async_planes = true; + + if (priv->data->shadow_register) { + mtk_disp_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc, NULL); + mtk_disp_mutex_release(mtk_crtc->mutex); + } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client) { + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + } +#endif + mutex_unlock(&mtk_crtc->hw_lock); } int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, @@ -401,7 +503,23 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_ddp_comp *comp; comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - return mtk_ddp_comp_layer_check(comp, local_layer, state); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; +} + +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + const struct drm_plane_helper_funcs *plane_helper_funcs = + plane->helper_private; + + if (!mtk_crtc->enabled) + return; + + plane_helper_funcs->atomic_update(plane, new_state); + mtk_drm_crtc_hw_config(mtk_crtc); } static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, @@ -451,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, } mtk_crtc->pending_planes = true; + mtk_drm_crtc_hw_config(mtk_crtc); /* Wait for planes to be disabled */ drm_crtc_wait_one_vblank(crtc); @@ -482,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_drm_private *priv = crtc->dev->dev_private; - unsigned int pending_planes = 0; int i; if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.dirty) { - plane_state->pending.config = true; - plane_state->pending.dirty = false; - pending_planes |= BIT(i); - } - } - if (pending_planes) - mtk_crtc->pending_planes = true; if (crtc->state->color_mgmt_changed) - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); - - if (priv->data->shadow_register) { - mtk_disp_mutex_acquire(mtk_crtc->mutex); - mtk_crtc_ddp_config(crtc); - mtk_disp_mutex_release(mtk_crtc->mutex); - } + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); + } + mtk_drm_crtc_hw_config(mtk_crtc); } static const struct drm_crtc_funcs mtk_crtc_funcs = { @@ -559,8 +660,12 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) +#else if (!priv->data->shadow_register) - mtk_crtc_ddp_config(crtc); +#endif + mtk_crtc_ddp_config(crtc, NULL); mtk_drm_finish_page_flip(mtk_crtc); } @@ -627,6 +732,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, int pipe = priv->num_pipes; int ret; int i; + bool has_ctm = false; + uint gamma_lut_size = 0; if (!path) return 0; @@ -677,6 +784,14 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs) { + if (comp->funcs->gamma_set) + gamma_lut_size = MTK_LUT_SIZE; + + if (comp->funcs->ctm_set) + has_ctm = true; + } } for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) @@ -697,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, NULL, pipe); if (ret < 0) return ret; - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); - priv->num_pipes++; + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + priv->num_pipes++; + mutex_init(&mtk_crtc->hw_lock); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_crtc->cmdq_client = + cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), + 2000); + if (IS_ERR(mtk_crtc->cmdq_client)) { + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", + drm_crtc_index(&mtk_crtc->base)); + mtk_crtc->cmdq_client = NULL; + } + ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events", + drm_crtc_index(&mtk_crtc->base), + &mtk_crtc->cmdq_event); + if (ret) + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); +#endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 6afe1c19557a..a2b4677a451c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -21,5 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, unsigned int path_len); int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *plane_state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 7f21307cda75..1f5a112bb034 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -12,7 +12,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> - +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_drv.h" #include "mtk_drm_plane.h" #include "mtk_drm_ddp_comp.h" @@ -37,7 +37,15 @@ #define CCORR_EN BIT(0) #define DISP_CCORR_CFG 0x0020 #define CCORR_RELAY_MODE BIT(0) +#define CCORR_ENGINE_EN BIT(1) +#define CCORR_GAMMA_OFF BIT(2) +#define CCORR_WGAMUT_SRC_CLIP BIT(3) #define DISP_CCORR_SIZE 0x0030 +#define DISP_CCORR_COEF_0 0x0080 +#define DISP_CCORR_COEF_1 0x0084 +#define DISP_CCORR_COEF_2 0x0088 +#define DISP_CCORR_COEF_3 0x008C +#define DISP_CCORR_COEF_4 0x0090 #define DISP_DITHER_EN 0x0000 #define DITHER_EN BIT(0) @@ -76,36 +84,84 @@ #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value); + else +#endif + writel(value, comp->regs + offset); +} + +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value); + else +#endif + writel_relaxed(value, comp->regs + offset); +} + +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, + unsigned int value, + struct mtk_ddp_comp *comp, + unsigned int offset, + unsigned int mask) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) { + cmdq_pkt_write_mask(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value, mask); + } else { +#endif + u32 tmp = readl(comp->regs + offset); + + tmp = (tmp & ~mask) | (value & mask); + writel(tmp, comp->regs + offset); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + } +#endif +} + void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, - unsigned int CFG) + unsigned int CFG, struct cmdq_pkt *cmdq_pkt) { /* If bpc equal to 0, the dithering function didn't be enabled */ if (bpc == 0) return; if (bpc >= MTK_MIN_BPC) { - writel(0, comp->regs + DISP_DITHER_5); - writel(0, comp->regs + DISP_DITHER_7); - writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_NEW_BIT_MODE, - comp->regs + DISP_DITHER_15); - writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - comp->regs + DISP_DITHER_16); - writel(DISP_DITHERING, comp->regs + CFG); + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_NEW_BIT_MODE, + comp, DISP_DITHER_15); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), + comp, DISP_DITHER_16); + mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG); } } static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(w << 16 | h, comp->regs + DISP_OD_SIZE); - writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); - mtk_dither_set(comp, bpc, DISP_OD_CFG); + mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG); + mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt); } static void mtk_od_start(struct mtk_ddp_comp *comp) @@ -120,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp) static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE); } static void mtk_aal_start(struct mtk_ddp_comp *comp) @@ -137,10 +193,10 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp) static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE); - writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE); + mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG); } static void mtk_ccorr_start(struct mtk_ddp_comp *comp) @@ -153,12 +209,63 @@ static void mtk_ccorr_stop(struct mtk_ddp_comp *comp) writel_relaxed(0x0, comp->regs + DISP_CCORR_EN); } +/* Converts a DRM S31.32 value to the HW S1.10 format. */ +static u16 mtk_ctm_s31_32_to_s1_10(u64 in) +{ + u16 r; + + /* Sign bit. */ + r = in & BIT_ULL(63) ? BIT(11) : 0; + + if ((in & GENMASK_ULL(62, 33)) > 0) { + /* identity value 0x100000000 -> 0x400, */ + /* if bigger this, set it to max 0x7ff. */ + r |= GENMASK(10, 0); + } else { + /* take the 11 most important bits. */ + r |= (in >> 22) & GENMASK(10, 0); + } + + return r; +} + +static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + struct drm_property_blob *blob = state->ctm; + struct drm_color_ctm *ctm; + const u64 *input; + uint16_t coeffs[9] = { 0 }; + int i; + struct cmdq_pkt *cmdq_pkt = NULL; + + if (!blob) + return; + + ctm = (struct drm_color_ctm *)blob->data; + input = ctm->matrix; + + for (i = 0; i < ARRAY_SIZE(coeffs); i++) + coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]); + + mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1], + comp, DISP_CCORR_COEF_0); + mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3], + comp, DISP_CCORR_COEF_1); + mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5], + comp, DISP_CCORR_COEF_2); + mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7], + comp, DISP_CCORR_COEF_3); + mtk_ddp_write(cmdq_pkt, coeffs[8] << 16, + comp, DISP_CCORR_COEF_4); +} + static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE); - writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG); } static void mtk_dither_start(struct mtk_ddp_comp *comp) @@ -173,10 +280,10 @@ static void mtk_dither_stop(struct mtk_ddp_comp *comp) static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE); - mtk_dither_set(comp, bpc, DISP_GAMMA_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE); + mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt); } static void mtk_gamma_start(struct mtk_ddp_comp *comp) @@ -223,6 +330,7 @@ static const struct mtk_ddp_comp_funcs ddp_ccorr = { .config = mtk_ccorr_config, .start = mtk_ccorr_start, .stop = mtk_ccorr_stop, + .ctm_set = mtk_ccorr_ctm_set, }; static const struct mtk_ddp_comp_funcs ddp_dither = { @@ -326,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, enum mtk_ddp_comp_type type; struct device_node *larb_node; struct platform_device *larb_pdev; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct resource res; + struct cmdq_client_reg cmdq_reg; + int ret; +#endif if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) return -EINVAL; @@ -379,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, comp->larb_dev = &larb_pdev->dev; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (of_address_to_resource(node, 0, &res) != 0) { + dev_err(dev, "Missing reg in %s node\n", node->full_name); + return -EINVAL; + } + comp->regs_pa = res.start; + + ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); + else + comp->subsys = cmdq_reg.subsys; +#endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 2f1e9e75b8da..debe36395fe7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -69,27 +69,29 @@ enum mtk_ddp_comp_id { }; struct mtk_ddp_comp; - +struct cmdq_pkt; struct mtk_ddp_comp_funcs { void (*config)(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh, unsigned int bpc); + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); void (*start)(struct mtk_ddp_comp *comp); void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); - void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); - void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); int (*layer_check)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state); + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); void (*gamma_set)(struct mtk_ddp_comp *comp, struct drm_crtc_state *state); void (*bgclr_in_on)(struct mtk_ddp_comp *comp); void (*bgclr_in_off)(struct mtk_ddp_comp *comp); + void (*ctm_set)(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state); }; struct mtk_ddp_comp { @@ -99,14 +101,17 @@ struct mtk_ddp_comp { struct device *larb_dev; enum mtk_ddp_comp_id id; const struct mtk_ddp_comp_funcs *funcs; + resource_size_t regs_pa; + u8 subsys; }; static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, - unsigned int vrefresh, unsigned int bpc) + unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt) { if (comp->funcs && comp->funcs->config) - comp->funcs->config(comp, w, h, vrefresh, bpc); + comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt); } static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) @@ -151,20 +156,6 @@ static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) return 0; } -static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, - unsigned int idx) -{ - if (comp->funcs && comp->funcs->layer_on) - comp->funcs->layer_on(comp, idx); -} - -static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, - unsigned int idx) -{ - if (comp->funcs && comp->funcs->layer_off) - comp->funcs->layer_off(comp, idx); -} - static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) @@ -176,10 +167,11 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { if (comp->funcs && comp->funcs->layer_config) - comp->funcs->layer_config(comp, idx, state); + comp->funcs->layer_config(comp, idx, state, cmdq_pkt); } static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, @@ -201,6 +193,13 @@ static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) comp->funcs->bgclr_in_off(comp); } +static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->ctm_set) + comp->funcs->ctm_set(comp, state); +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, @@ -209,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, - unsigned int CFG); - + unsigned int CFG, struct cmdq_pkt *cmdq_pkt); +enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id); +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset); +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset); +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset, + unsigned int mask); #endif /* MTK_DRM_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 2b1c122066ea..0563c6813333 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -37,84 +37,9 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -static void mtk_atomic_schedule(struct mtk_drm_private *private, - struct drm_atomic_state *state) -{ - private->commit.state = state; - schedule_work(&private->commit.work); -} - -static void mtk_atomic_complete(struct mtk_drm_private *private, - struct drm_atomic_state *state) -{ - struct drm_device *drm = private->drm; - - drm_atomic_helper_wait_for_fences(drm, state, false); - - /* - * Mediatek drm supports runtime PM, so plane registers cannot be - * written when their crtc is disabled. - * - * The comment for drm_atomic_helper_commit states: - * For drivers supporting runtime PM the recommended sequence is - * - * drm_atomic_helper_commit_modeset_disables(dev, state); - * drm_atomic_helper_commit_modeset_enables(dev, state); - * drm_atomic_helper_commit_planes(dev, state, - * DRM_PLANE_COMMIT_ACTIVE_ONLY); - * - * See the kerneldoc entries for these three functions for more details. - */ - drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_modeset_enables(drm, state); - drm_atomic_helper_commit_planes(drm, state, - DRM_PLANE_COMMIT_ACTIVE_ONLY); - - drm_atomic_helper_wait_for_vblanks(drm, state); - - drm_atomic_helper_cleanup_planes(drm, state); - drm_atomic_state_put(state); -} - -static void mtk_atomic_work(struct work_struct *work) -{ - struct mtk_drm_private *private = container_of(work, - struct mtk_drm_private, commit.work); - - mtk_atomic_complete(private, private->commit.state); -} - -static int mtk_atomic_commit(struct drm_device *drm, - struct drm_atomic_state *state, - bool async) -{ - struct mtk_drm_private *private = drm->dev_private; - int ret; - - ret = drm_atomic_helper_prepare_planes(drm, state); - if (ret) - return ret; - - mutex_lock(&private->commit.lock); - flush_work(&private->commit.work); - - ret = drm_atomic_helper_swap_state(state, true); - if (ret) { - mutex_unlock(&private->commit.lock); - drm_atomic_helper_cleanup_planes(drm, state); - return ret; - } - - drm_atomic_state_get(state); - if (async) - mtk_atomic_schedule(private, state); - else - mtk_atomic_complete(private, state); - - mutex_unlock(&private->commit.lock); - - return 0; -} +static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; static struct drm_framebuffer * mtk_drm_mode_fb_create(struct drm_device *dev, @@ -132,7 +57,7 @@ mtk_drm_mode_fb_create(struct drm_device *dev, static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { .fb_create = mtk_drm_mode_fb_create, .atomic_check = drm_atomic_helper_check, - .atomic_commit = mtk_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = { @@ -250,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &mtk_drm_mode_config_funcs; + drm->mode_config.helper_private = &mtk_drm_mode_config_helpers; ret = component_bind_all(drm->dev, drm); if (ret) @@ -509,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!private) return -ENOMEM; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, mtk_atomic_work); private->data = of_device_get_match_data(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index e03fea12ff59..17bc99b9f5d4 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -43,13 +43,6 @@ struct mtk_drm_private { struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; const struct mtk_mmsys_driver_data *data; - - struct { - struct drm_atomic_state *state; - struct work_struct work; - struct mutex lock; - } commit; - struct drm_atomic_state *suspend_state; bool dma_parms_allocated; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index f0b0325381e0..914cc7619cd7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_atomic_uapi.h> #include <drm/drm_plane_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -75,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane, kfree(to_mtk_plane_state(state)); } +static int mtk_plane_atomic_async_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + + if (plane != state->crtc->cursor) + return -EINVAL; + + if (!plane->state) + return -EINVAL; + + if (!plane->state->fb) + return -EINVAL; + + if (state->state) + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + else /* Special case for asynchronous cursor updates. */ + crtc_state = state->crtc->state; + + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); +} + +static void mtk_plane_atomic_async_update(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_h = new_state->crtc_h; + plane->state->crtc_w = new_state->crtc_w; + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_h = new_state->src_h; + plane->state->src_w = new_state->src_w; + state->pending.async_dirty = true; + + mtk_drm_crtc_async_update(new_state->crtc, plane, new_state); +} + static const struct drm_plane_funcs mtk_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -163,6 +208,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .atomic_check = mtk_plane_atomic_check, .atomic_update = mtk_plane_atomic_update, .atomic_disable = mtk_plane_atomic_disable, + .atomic_async_update = mtk_plane_atomic_async_update, + .atomic_async_check = mtk_plane_atomic_async_check, }; int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 760885e35b27..d454bece9535 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -22,6 +22,8 @@ struct mtk_plane_pending_state { unsigned int height; unsigned int rotation; bool dirty; + bool async_dirty; + bool async_config; }; struct mtk_plane_state { diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 714af052fbef..7c70fd31a4c2 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1727,6 +1727,7 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, { struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; + int err; if (!tegra_dc_idle(dc)) { tegra_dc_stop(dc); @@ -1773,7 +1774,9 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, spin_unlock_irq(&crtc->dev->event_lock); - pm_runtime_put_sync(dc->dev); + err = host1x_client_suspend(&dc->client); + if (err < 0) + dev_err(dc->dev, "failed to suspend: %d\n", err); } static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, @@ -1783,8 +1786,13 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, struct tegra_dc_state *state = to_dc_state(crtc->state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; + int err; - pm_runtime_get_sync(dc->dev); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } /* initialize display controller */ if (dc->syncpt) { @@ -1996,7 +2004,7 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc) static int tegra_dc_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED; struct tegra_dc *dc = host1x_client_to_dc(client); struct tegra_drm *tegra = drm->dev_private; @@ -2012,6 +2020,15 @@ static int tegra_dc_init(struct host1x_client *client) if (!tegra_dc_has_window_groups(dc)) return 0; + /* + * Set the display hub as the host1x client parent for the display + * controller. This is needed for the runtime reference counting that + * ensures the display hub is always powered when any of the display + * controllers are. + */ + if (dc->soc->has_nvdisplay) + client->parent = &tegra->hub->client; + dc->syncpt = host1x_syncpt_request(client, flags); if (!dc->syncpt) dev_warn(dc->dev, "failed to allocate syncpoint\n"); @@ -2077,9 +2094,9 @@ static int tegra_dc_init(struct host1x_client *client) /* * Inherit the DMA parameters (such as maximum segment size) from the - * parent device. + * parent host1x device. */ - client->dev->dma_parms = client->parent->dma_parms; + client->dev->dma_parms = client->host->dma_parms; return 0; @@ -2121,9 +2138,74 @@ static int tegra_dc_exit(struct host1x_client *client) return 0; } +static int tegra_dc_runtime_suspend(struct host1x_client *client) +{ + struct tegra_dc *dc = host1x_client_to_dc(client); + struct device *dev = client->dev; + int err; + + err = reset_control_assert(dc->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + if (dc->soc->has_powergate) + tegra_powergate_power_off(dc->powergate); + + clk_disable_unprepare(dc->clk); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_dc_runtime_resume(struct host1x_client *client) +{ + struct tegra_dc *dc = host1x_client_to_dc(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + if (dc->soc->has_powergate) { + err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, + dc->rst); + if (err < 0) { + dev_err(dev, "failed to power partition: %d\n", err); + goto put_rpm; + } + } else { + err = clk_prepare_enable(dc->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto put_rpm; + } + + err = reset_control_deassert(dc->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + } + + return 0; + +disable_clk: + clk_disable_unprepare(dc->clk); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops dc_client_ops = { .init = tegra_dc_init, .exit = tegra_dc_exit, + .suspend = tegra_dc_runtime_suspend, + .resume = tegra_dc_runtime_resume, }; static const struct tegra_dc_soc_info tegra20_dc_soc_info = { @@ -2535,65 +2617,10 @@ static int tegra_dc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_dc_suspend(struct device *dev) -{ - struct tegra_dc *dc = dev_get_drvdata(dev); - int err; - - err = reset_control_assert(dc->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - - if (dc->soc->has_powergate) - tegra_powergate_power_off(dc->powergate); - - clk_disable_unprepare(dc->clk); - - return 0; -} - -static int tegra_dc_resume(struct device *dev) -{ - struct tegra_dc *dc = dev_get_drvdata(dev); - int err; - - if (dc->soc->has_powergate) { - err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, - dc->rst); - if (err < 0) { - dev_err(dev, "failed to power partition: %d\n", err); - return err; - } - } else { - err = clk_prepare_enable(dc->clk); - if (err < 0) { - dev_err(dev, "failed to enable clock: %d\n", err); - return err; - } - - err = reset_control_deassert(dc->rst); - if (err < 0) { - dev_err(dev, "failed to deassert reset: %d\n", err); - return err; - } - } - - return 0; -} -#endif - -static const struct dev_pm_ops tegra_dc_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL) -}; - struct platform_driver tegra_dc_driver = { .driver = { .name = "tegra-dc", .of_match_table = tegra_dc_of_match, - .pm = &tegra_dc_pm_ops, }, .probe = tegra_dc_probe, .remove = tegra_dc_remove, diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 622cdf1ad246..7dfb50f65067 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -588,7 +588,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev) /* make sure pads are powered down when not in use */ tegra_dpaux_pad_power_down(dpaux); - pm_runtime_put(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); drm_dp_aux_unregister(&dpaux->aux); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index f455ce71e85d..aa9e49f04988 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -905,7 +905,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, int host1x_client_iommu_attach(struct host1x_client *client) { struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev); - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; struct iommu_group *group = NULL; int err; @@ -941,7 +941,7 @@ int host1x_client_iommu_attach(struct host1x_client *client) void host1x_client_iommu_detach(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; struct iommu_domain *domain; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index d941553f7a3d..ed99b67deb29 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -144,6 +144,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output); void tegra_output_exit(struct tegra_output *output); void tegra_output_find_possible_crtcs(struct tegra_output *output, struct drm_device *drm); +int tegra_output_suspend(struct tegra_output *output); +int tegra_output_resume(struct tegra_output *output); int tegra_output_connector_get_modes(struct drm_connector *connector); enum drm_connector_status diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index a5d47e301c5f..88b9d64c77bf 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -840,7 +840,9 @@ static void tegra_dsi_unprepare(struct tegra_dsi *dsi) dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n", err); - pm_runtime_put(dsi->dev); + err = host1x_client_suspend(&dsi->client); + if (err < 0) + dev_err(dsi->dev, "failed to suspend: %d\n", err); } static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) @@ -882,11 +884,15 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) tegra_dsi_unprepare(dsi); } -static void tegra_dsi_prepare(struct tegra_dsi *dsi) +static int tegra_dsi_prepare(struct tegra_dsi *dsi) { int err; - pm_runtime_get_sync(dsi->dev); + err = host1x_client_resume(&dsi->client); + if (err < 0) { + dev_err(dsi->dev, "failed to resume: %d\n", err); + return err; + } err = tegra_mipi_enable(dsi->mipi); if (err < 0) @@ -899,6 +905,8 @@ static void tegra_dsi_prepare(struct tegra_dsi *dsi) if (dsi->slave) tegra_dsi_prepare(dsi->slave); + + return 0; } static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) @@ -909,8 +917,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) struct tegra_dsi *dsi = to_dsi(output); struct tegra_dsi_state *state; u32 value; + int err; - tegra_dsi_prepare(dsi); + err = tegra_dsi_prepare(dsi); + if (err < 0) { + dev_err(dsi->dev, "failed to prepare: %d\n", err); + return; + } state = tegra_dsi_get_state(dsi); @@ -1030,7 +1043,7 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { static int tegra_dsi_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_dsi *dsi = host1x_client_to_dsi(client); int err; @@ -1075,9 +1088,89 @@ static int tegra_dsi_exit(struct host1x_client *client) return 0; } +static int tegra_dsi_runtime_suspend(struct host1x_client *client) +{ + struct tegra_dsi *dsi = host1x_client_to_dsi(client); + struct device *dev = client->dev; + int err; + + if (dsi->rst) { + err = reset_control_assert(dsi->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(dsi->clk_lp); + clk_disable_unprepare(dsi->clk); + + regulator_disable(dsi->vdd); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_dsi_runtime_resume(struct host1x_client *client) +{ + struct tegra_dsi *dsi = host1x_client_to_dsi(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = regulator_enable(dsi->vdd); + if (err < 0) { + dev_err(dev, "failed to enable VDD supply: %d\n", err); + goto put_rpm; + } + + err = clk_prepare_enable(dsi->clk); + if (err < 0) { + dev_err(dev, "cannot enable DSI clock: %d\n", err); + goto disable_vdd; + } + + err = clk_prepare_enable(dsi->clk_lp); + if (err < 0) { + dev_err(dev, "cannot enable low-power clock: %d\n", err); + goto disable_clk; + } + + usleep_range(1000, 2000); + + if (dsi->rst) { + err = reset_control_deassert(dsi->rst); + if (err < 0) { + dev_err(dev, "cannot assert reset: %d\n", err); + goto disable_clk_lp; + } + } + + return 0; + +disable_clk_lp: + clk_disable_unprepare(dsi->clk_lp); +disable_clk: + clk_disable_unprepare(dsi->clk); +disable_vdd: + regulator_disable(dsi->vdd); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops dsi_client_ops = { .init = tegra_dsi_init, .exit = tegra_dsi_exit, + .suspend = tegra_dsi_runtime_suspend, + .resume = tegra_dsi_runtime_resume, }; static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi) @@ -1596,79 +1689,6 @@ static int tegra_dsi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_dsi_suspend(struct device *dev) -{ - struct tegra_dsi *dsi = dev_get_drvdata(dev); - int err; - - if (dsi->rst) { - err = reset_control_assert(dsi->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - } - - usleep_range(1000, 2000); - - clk_disable_unprepare(dsi->clk_lp); - clk_disable_unprepare(dsi->clk); - - regulator_disable(dsi->vdd); - - return 0; -} - -static int tegra_dsi_resume(struct device *dev) -{ - struct tegra_dsi *dsi = dev_get_drvdata(dev); - int err; - - err = regulator_enable(dsi->vdd); - if (err < 0) { - dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err); - return err; - } - - err = clk_prepare_enable(dsi->clk); - if (err < 0) { - dev_err(dev, "cannot enable DSI clock: %d\n", err); - goto disable_vdd; - } - - err = clk_prepare_enable(dsi->clk_lp); - if (err < 0) { - dev_err(dev, "cannot enable low-power clock: %d\n", err); - goto disable_clk; - } - - usleep_range(1000, 2000); - - if (dsi->rst) { - err = reset_control_deassert(dsi->rst); - if (err < 0) { - dev_err(dev, "cannot assert reset: %d\n", err); - goto disable_clk_lp; - } - } - - return 0; - -disable_clk_lp: - clk_disable_unprepare(dsi->clk_lp); -disable_clk: - clk_disable_unprepare(dsi->clk); -disable_vdd: - regulator_disable(dsi->vdd); - return err; -} -#endif - -static const struct dev_pm_ops tegra_dsi_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL) -}; - static const struct of_device_id tegra_dsi_of_match[] = { { .compatible = "nvidia,tegra210-dsi", }, { .compatible = "nvidia,tegra132-dsi", }, @@ -1682,7 +1702,6 @@ struct platform_driver tegra_dsi_driver = { .driver = { .name = "tegra-dsi", .of_match_table = tegra_dsi_of_match, - .pm = &tegra_dsi_pm_ops, }, .probe = tegra_dsi_probe, .remove = tegra_dsi_remove, diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 1fc4e56c7cc5..48363f744bb9 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -34,7 +34,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client) static int gr2d_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); unsigned long flags = HOST1X_SYNCPT_HAS_BASE; struct gr2d *gr2d = to_gr2d(drm); int err; @@ -76,7 +76,7 @@ put: static int gr2d_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct tegra_drm *tegra = dev->dev_private; struct gr2d *gr2d = to_gr2d(drm); int err; diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 24fae0f64032..c0a528be0369 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -43,7 +43,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client) static int gr3d_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); unsigned long flags = HOST1X_SYNCPT_HAS_BASE; struct gr3d *gr3d = to_gr3d(drm); int err; @@ -85,7 +85,7 @@ put: static int gr3d_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct gr3d *gr3d = to_gr3d(drm); int err; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 21a629adcb51..6f117628f257 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1146,6 +1146,7 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_hdmi *hdmi = to_hdmi(output); u32 value; + int err; /* * The following accesses registers of the display controller, so make @@ -1171,7 +1172,9 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK); - pm_runtime_put(hdmi->dev); + err = host1x_client_suspend(&hdmi->client); + if (err < 0) + dev_err(hdmi->dev, "failed to suspend: %d\n", err); } static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) @@ -1186,7 +1189,11 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) u32 value; int err; - pm_runtime_get_sync(hdmi->dev); + err = host1x_client_resume(&hdmi->client); + if (err < 0) { + dev_err(hdmi->dev, "failed to resume: %d\n", err); + return; + } /* * Enable and unmask the HDA codec SCRATCH0 register interrupt. This @@ -1424,8 +1431,8 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { static int tegra_hdmi_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + struct drm_device *drm = dev_get_drvdata(client->host); int err; hdmi->output.dev = client->dev; @@ -1490,9 +1497,66 @@ static int tegra_hdmi_exit(struct host1x_client *client) return 0; } +static int tegra_hdmi_runtime_suspend(struct host1x_client *client) +{ + struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + struct device *dev = client->dev; + int err; + + err = reset_control_assert(hdmi->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(hdmi->clk); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_hdmi_runtime_resume(struct host1x_client *client) +{ + struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = clk_prepare_enable(hdmi->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto put_rpm; + } + + usleep_range(1000, 2000); + + err = reset_control_deassert(hdmi->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + + return 0; + +disable_clk: + clk_disable_unprepare(hdmi->clk); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops hdmi_client_ops = { .init = tegra_hdmi_init, .exit = tegra_hdmi_exit, + .suspend = tegra_hdmi_runtime_suspend, + .resume = tegra_hdmi_runtime_resume, }; static const struct tegra_hdmi_config tegra20_hdmi_config = { @@ -1700,58 +1764,10 @@ static int tegra_hdmi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_hdmi_suspend(struct device *dev) -{ - struct tegra_hdmi *hdmi = dev_get_drvdata(dev); - int err; - - err = reset_control_assert(hdmi->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - - usleep_range(1000, 2000); - - clk_disable_unprepare(hdmi->clk); - - return 0; -} - -static int tegra_hdmi_resume(struct device *dev) -{ - struct tegra_hdmi *hdmi = dev_get_drvdata(dev); - int err; - - err = clk_prepare_enable(hdmi->clk); - if (err < 0) { - dev_err(dev, "failed to enable clock: %d\n", err); - return err; - } - - usleep_range(1000, 2000); - - err = reset_control_deassert(hdmi->rst); - if (err < 0) { - dev_err(dev, "failed to deassert reset: %d\n", err); - clk_disable_unprepare(hdmi->clk); - return err; - } - - return 0; -} -#endif - -static const struct dev_pm_ops tegra_hdmi_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL) -}; - struct platform_driver tegra_hdmi_driver = { .driver = { .name = "tegra-hdmi", .of_match_table = tegra_hdmi_of_match, - .pm = &tegra_hdmi_pm_ops, }, .probe = tegra_hdmi_probe, .remove = tegra_hdmi_remove, diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 47d985ac7cd7..8183e617bf6b 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -95,17 +95,25 @@ static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value, static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp) { + int err = 0; + mutex_lock(&wgrp->lock); if (wgrp->usecount == 0) { - pm_runtime_get_sync(wgrp->parent); + err = host1x_client_resume(wgrp->parent); + if (err < 0) { + dev_err(wgrp->parent->dev, "failed to resume: %d\n", err); + goto unlock; + } + reset_control_deassert(wgrp->rst); } wgrp->usecount++; - mutex_unlock(&wgrp->lock); - return 0; +unlock: + mutex_unlock(&wgrp->lock); + return err; } static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp) @@ -121,7 +129,7 @@ static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp) wgrp->index); } - pm_runtime_put(wgrp->parent); + host1x_client_suspend(wgrp->parent); } wgrp->usecount--; @@ -379,6 +387,7 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, struct tegra_plane *p = to_tegra_plane(plane); struct tegra_dc *dc; u32 value; + int err; /* rien ne va plus */ if (!old_state || !old_state->crtc) @@ -386,6 +395,12 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, dc = to_tegra_dc(old_state->crtc); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } + /* * XXX Legacy helpers seem to sometimes call ->atomic_disable() even * on planes that are already disabled. Make sure we fallback to the @@ -394,15 +409,13 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, if (WARN_ON(p->dc == NULL)) p->dc = dc; - pm_runtime_get_sync(dc->dev); - value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS); value &= ~WIN_ENABLE; tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS); tegra_dc_remove_shared_plane(dc, p); - pm_runtime_put(dc->dev); + host1x_client_suspend(&dc->client); } static void tegra_shared_plane_atomic_update(struct drm_plane *plane, @@ -415,6 +428,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, struct tegra_plane *p = to_tegra_plane(plane); dma_addr_t base; u32 value; + int err; /* rien ne va plus */ if (!plane->state->crtc || !plane->state->fb) @@ -425,7 +439,11 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, return; } - pm_runtime_get_sync(dc->dev); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } tegra_dc_assign_shared_plane(dc, p); @@ -515,7 +533,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, value &= ~CONTROL_CSC_ENABLE; tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL); - pm_runtime_put(dc->dev); + host1x_client_suspend(&dc->client); } static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { @@ -551,7 +569,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm, plane->base.index = index; plane->wgrp = &hub->wgrps[wgrp]; - plane->wgrp->parent = dc->dev; + plane->wgrp->parent = &dc->client; p = &plane->base.base; @@ -656,8 +674,13 @@ int tegra_display_hub_atomic_check(struct drm_device *drm, static void tegra_display_hub_update(struct tegra_dc *dc) { u32 value; + int err; - pm_runtime_get_sync(dc->dev); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL); value &= ~LATENCY_EVENT; @@ -672,7 +695,7 @@ static void tegra_display_hub_update(struct tegra_dc *dc) tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL); tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); - pm_runtime_put(dc->dev); + host1x_client_suspend(&dc->client); } void tegra_display_hub_atomic_commit(struct drm_device *drm, @@ -705,7 +728,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm, static int tegra_display_hub_init(struct host1x_client *client) { struct tegra_display_hub *hub = to_tegra_display_hub(client); - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; struct tegra_display_hub_state *state; @@ -723,7 +746,7 @@ static int tegra_display_hub_init(struct host1x_client *client) static int tegra_display_hub_exit(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; drm_atomic_private_obj_fini(&tegra->hub->base); @@ -732,9 +755,85 @@ static int tegra_display_hub_exit(struct host1x_client *client) return 0; } +static int tegra_display_hub_runtime_suspend(struct host1x_client *client) +{ + struct tegra_display_hub *hub = to_tegra_display_hub(client); + struct device *dev = client->dev; + unsigned int i = hub->num_heads; + int err; + + err = reset_control_assert(hub->rst); + if (err < 0) + return err; + + while (i--) + clk_disable_unprepare(hub->clk_heads[i]); + + clk_disable_unprepare(hub->clk_hub); + clk_disable_unprepare(hub->clk_dsc); + clk_disable_unprepare(hub->clk_disp); + + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_display_hub_runtime_resume(struct host1x_client *client) +{ + struct tegra_display_hub *hub = to_tegra_display_hub(client); + struct device *dev = client->dev; + unsigned int i; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = clk_prepare_enable(hub->clk_disp); + if (err < 0) + goto put_rpm; + + err = clk_prepare_enable(hub->clk_dsc); + if (err < 0) + goto disable_disp; + + err = clk_prepare_enable(hub->clk_hub); + if (err < 0) + goto disable_dsc; + + for (i = 0; i < hub->num_heads; i++) { + err = clk_prepare_enable(hub->clk_heads[i]); + if (err < 0) + goto disable_heads; + } + + err = reset_control_deassert(hub->rst); + if (err < 0) + goto disable_heads; + + return 0; + +disable_heads: + while (i--) + clk_disable_unprepare(hub->clk_heads[i]); + + clk_disable_unprepare(hub->clk_hub); +disable_dsc: + clk_disable_unprepare(hub->clk_dsc); +disable_disp: + clk_disable_unprepare(hub->clk_disp); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops tegra_display_hub_ops = { .init = tegra_display_hub_init, .exit = tegra_display_hub_exit, + .suspend = tegra_display_hub_runtime_suspend, + .resume = tegra_display_hub_runtime_resume, }; static int tegra_display_hub_probe(struct platform_device *pdev) @@ -851,6 +950,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev) static int tegra_display_hub_remove(struct platform_device *pdev) { struct tegra_display_hub *hub = platform_get_drvdata(pdev); + unsigned int i; int err; err = host1x_client_unregister(&hub->client); @@ -859,78 +959,17 @@ static int tegra_display_hub_remove(struct platform_device *pdev) err); } - pm_runtime_disable(&pdev->dev); - - return err; -} - -static int __maybe_unused tegra_display_hub_suspend(struct device *dev) -{ - struct tegra_display_hub *hub = dev_get_drvdata(dev); - unsigned int i = hub->num_heads; - int err; - - err = reset_control_assert(hub->rst); - if (err < 0) - return err; - - while (i--) - clk_disable_unprepare(hub->clk_heads[i]); - - clk_disable_unprepare(hub->clk_hub); - clk_disable_unprepare(hub->clk_dsc); - clk_disable_unprepare(hub->clk_disp); - - return 0; -} - -static int __maybe_unused tegra_display_hub_resume(struct device *dev) -{ - struct tegra_display_hub *hub = dev_get_drvdata(dev); - unsigned int i; - int err; - - err = clk_prepare_enable(hub->clk_disp); - if (err < 0) - return err; - - err = clk_prepare_enable(hub->clk_dsc); - if (err < 0) - goto disable_disp; - - err = clk_prepare_enable(hub->clk_hub); - if (err < 0) - goto disable_dsc; + for (i = 0; i < hub->soc->num_wgrps; i++) { + struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - for (i = 0; i < hub->num_heads; i++) { - err = clk_prepare_enable(hub->clk_heads[i]); - if (err < 0) - goto disable_heads; + mutex_destroy(&wgrp->lock); } - err = reset_control_deassert(hub->rst); - if (err < 0) - goto disable_heads; - - return 0; - -disable_heads: - while (i--) - clk_disable_unprepare(hub->clk_heads[i]); + pm_runtime_disable(&pdev->dev); - clk_disable_unprepare(hub->clk_hub); -disable_dsc: - clk_disable_unprepare(hub->clk_dsc); -disable_disp: - clk_disable_unprepare(hub->clk_disp); return err; } -static const struct dev_pm_ops tegra_display_hub_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_display_hub_suspend, - tegra_display_hub_resume, NULL) -}; - static const struct tegra_display_hub_soc tegra186_display_hub = { .num_wgrps = 6, .supports_dsc = true, @@ -958,7 +997,6 @@ struct platform_driver tegra_display_hub_driver = { .driver = { .name = "tegra-display-hub", .of_match_table = tegra_display_hub_of_match, - .pm = &tegra_display_hub_pm_ops, }, .probe = tegra_display_hub_probe, .remove = tegra_display_hub_remove, diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h index 767a60d9313c..3efa1be07ff8 100644 --- a/drivers/gpu/drm/tegra/hub.h +++ b/drivers/gpu/drm/tegra/hub.h @@ -17,7 +17,7 @@ struct tegra_windowgroup { struct mutex lock; unsigned int index; - struct device *parent; + struct host1x_client *parent; struct reset_control *rst; }; diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 80ddde4adbae..a264259b97a2 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -250,3 +250,19 @@ void tegra_output_find_possible_crtcs(struct tegra_output *output, output->encoder.possible_crtcs = mask; } + +int tegra_output_suspend(struct tegra_output *output) +{ + if (output->hpd_irq) + disable_irq(output->hpd_irq); + + return 0; +} + +int tegra_output_resume(struct tegra_output *output) +{ + if (output->hpd_irq) + enable_irq(output->hpd_irq); + + return 0; +} diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 1b8087d2dafe..41d24949478e 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2255,7 +2255,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) if (err < 0) dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); - pm_runtime_put(sor->dev); + host1x_client_suspend(&sor->client); } static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) @@ -2276,7 +2276,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) mode = &encoder->crtc->state->adjusted_mode; pclk = mode->clock * 1000; - pm_runtime_get_sync(sor->dev); + err = host1x_client_resume(&sor->client); + if (err < 0) { + dev_err(sor->dev, "failed to resume: %d\n", err); + return; + } /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); @@ -2722,7 +2726,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder) if (output->panel) drm_panel_unprepare(output->panel); - pm_runtime_put(sor->dev); + host1x_client_suspend(&sor->client); } static void tegra_sor_dp_enable(struct drm_encoder *encoder) @@ -2742,7 +2746,11 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder) mode = &encoder->crtc->state->adjusted_mode; info = &output->connector.display_info; - pm_runtime_get_sync(sor->dev); + err = host1x_client_resume(&sor->client); + if (err < 0) { + dev_err(sor->dev, "failed to resume: %d\n", err); + return; + } /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); @@ -3053,7 +3061,7 @@ static const struct tegra_sor_ops tegra_sor_dp_ops = { static int tegra_sor_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); const struct drm_encoder_helper_funcs *helpers = NULL; struct tegra_sor *sor = host1x_client_to_sor(client); int connector = DRM_MODE_CONNECTOR_Unknown; @@ -3190,9 +3198,80 @@ static int tegra_sor_exit(struct host1x_client *client) return 0; } +static int tegra_sor_runtime_suspend(struct host1x_client *client) +{ + struct tegra_sor *sor = host1x_client_to_sor(client); + struct device *dev = client->dev; + int err; + + if (sor->rst) { + err = reset_control_assert(sor->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + reset_control_release(sor->rst); + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(sor->clk); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_sor_runtime_resume(struct host1x_client *client) +{ + struct tegra_sor *sor = host1x_client_to_sor(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = clk_prepare_enable(sor->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto put_rpm; + } + + usleep_range(1000, 2000); + + if (sor->rst) { + err = reset_control_acquire(sor->rst); + if (err < 0) { + dev_err(dev, "failed to acquire reset: %d\n", err); + goto disable_clk; + } + + err = reset_control_deassert(sor->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto release_reset; + } + } + + return 0; + +release_reset: + reset_control_release(sor->rst); +disable_clk: + clk_disable_unprepare(sor->clk); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops sor_client_ops = { .init = tegra_sor_init, .exit = tegra_sor_exit, + .suspend = tegra_sor_runtime_suspend, + .resume = tegra_sor_runtime_resume, }; static const u8 tegra124_sor_xbar_cfg[5] = { @@ -3843,10 +3922,9 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->clk_pad) { char *name; - err = pm_runtime_get_sync(&pdev->dev); + err = host1x_client_resume(&sor->client); if (err < 0) { - dev_err(&pdev->dev, "failed to get runtime PM: %d\n", - err); + dev_err(sor->dev, "failed to resume: %d\n", err); goto remove; } @@ -3857,7 +3935,7 @@ static int tegra_sor_probe(struct platform_device *pdev) } sor->clk_pad = tegra_clk_sor_pad_register(sor, name); - pm_runtime_put(&pdev->dev); + host1x_client_suspend(&sor->client); } if (IS_ERR(sor->clk_pad)) { @@ -3913,54 +3991,21 @@ static int tegra_sor_remove(struct platform_device *pdev) return 0; } -static int tegra_sor_runtime_suspend(struct device *dev) -{ - struct tegra_sor *sor = dev_get_drvdata(dev); - int err; - - if (sor->rst) { - err = reset_control_assert(sor->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - - reset_control_release(sor->rst); - } - - usleep_range(1000, 2000); - - clk_disable_unprepare(sor->clk); - - return 0; -} - -static int tegra_sor_runtime_resume(struct device *dev) +static int __maybe_unused tegra_sor_suspend(struct device *dev) { struct tegra_sor *sor = dev_get_drvdata(dev); int err; - err = clk_prepare_enable(sor->clk); + err = tegra_output_suspend(&sor->output); if (err < 0) { - dev_err(dev, "failed to enable clock: %d\n", err); + dev_err(dev, "failed to suspend output: %d\n", err); return err; } - usleep_range(1000, 2000); - - if (sor->rst) { - err = reset_control_acquire(sor->rst); - if (err < 0) { - dev_err(dev, "failed to acquire reset: %d\n", err); - clk_disable_unprepare(sor->clk); - return err; - } - - err = reset_control_deassert(sor->rst); + if (sor->hdmi_supply) { + err = regulator_disable(sor->hdmi_supply); if (err < 0) { - dev_err(dev, "failed to deassert reset: %d\n", err); - reset_control_release(sor->rst); - clk_disable_unprepare(sor->clk); + tegra_output_resume(&sor->output); return err; } } @@ -3968,37 +4013,31 @@ static int tegra_sor_runtime_resume(struct device *dev) return 0; } -static int tegra_sor_suspend(struct device *dev) +static int __maybe_unused tegra_sor_resume(struct device *dev) { struct tegra_sor *sor = dev_get_drvdata(dev); int err; if (sor->hdmi_supply) { - err = regulator_disable(sor->hdmi_supply); + err = regulator_enable(sor->hdmi_supply); if (err < 0) return err; } - return 0; -} + err = tegra_output_resume(&sor->output); + if (err < 0) { + dev_err(dev, "failed to resume output: %d\n", err); -static int tegra_sor_resume(struct device *dev) -{ - struct tegra_sor *sor = dev_get_drvdata(dev); - int err; + if (sor->hdmi_supply) + regulator_disable(sor->hdmi_supply); - if (sor->hdmi_supply) { - err = regulator_enable(sor->hdmi_supply); - if (err < 0) - return err; + return err; } return 0; } static const struct dev_pm_ops tegra_sor_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume, - NULL) SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume) }; diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index 3526c2892ddb..ade56b860cf9 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -161,7 +161,7 @@ static int vic_boot(struct vic *vic) static int vic_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; @@ -190,9 +190,9 @@ static int vic_init(struct host1x_client *client) /* * Inherit the DMA parameters (such as maximum segment size) from the - * parent device. + * parent host1x device. */ - client->dev->dma_parms = client->parent->dma_parms; + client->dev->dma_parms = client->host->dma_parms; return 0; @@ -209,7 +209,7 @@ detach: static int vic_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 2c8559ff3481..6a995db51d6d 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -120,7 +120,7 @@ static void host1x_subdev_register(struct host1x_device *device, mutex_lock(&device->clients_lock); list_move_tail(&client->list, &device->clients); list_move_tail(&subdev->list, &device->active); - client->parent = &device->dev; + client->host = &device->dev; subdev->client = client; mutex_unlock(&device->clients_lock); mutex_unlock(&device->subdevs_lock); @@ -156,7 +156,7 @@ static void __host1x_subdev_unregister(struct host1x_device *device, */ mutex_lock(&device->clients_lock); subdev->client = NULL; - client->parent = NULL; + client->host = NULL; list_move_tail(&subdev->list, &device->subdevs); /* * XXX: Perhaps don't do this here, but rather explicitly remove it @@ -710,6 +710,10 @@ int host1x_client_register(struct host1x_client *client) struct host1x *host1x; int err; + INIT_LIST_HEAD(&client->list); + mutex_init(&client->lock); + client->usecount = 0; + mutex_lock(&devices_lock); list_for_each_entry(host1x, &devices, list) { @@ -768,3 +772,74 @@ int host1x_client_unregister(struct host1x_client *client) return 0; } EXPORT_SYMBOL(host1x_client_unregister); + +int host1x_client_suspend(struct host1x_client *client) +{ + int err = 0; + + mutex_lock(&client->lock); + + if (client->usecount == 1) { + if (client->ops && client->ops->suspend) { + err = client->ops->suspend(client); + if (err < 0) + goto unlock; + } + } + + client->usecount--; + dev_dbg(client->dev, "use count: %u\n", client->usecount); + + if (client->parent) { + err = host1x_client_suspend(client->parent); + if (err < 0) + goto resume; + } + + goto unlock; + +resume: + if (client->usecount == 0) + if (client->ops && client->ops->resume) + client->ops->resume(client); + + client->usecount++; +unlock: + mutex_unlock(&client->lock); + return err; +} +EXPORT_SYMBOL(host1x_client_suspend); + +int host1x_client_resume(struct host1x_client *client) +{ + int err = 0; + + mutex_lock(&client->lock); + + if (client->parent) { + err = host1x_client_resume(client->parent); + if (err < 0) + goto unlock; + } + + if (client->usecount == 0) { + if (client->ops && client->ops->resume) { + err = client->ops->resume(client); + if (err < 0) + goto suspend; + } + } + + client->usecount++; + dev_dbg(client->dev, "use count: %u\n", client->usecount); + + goto unlock; + +suspend: + if (client->parent) + host1x_client_suspend(client->parent); +unlock: + mutex_unlock(&client->lock); + return err; +} +EXPORT_SYMBOL(host1x_client_resume); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index a738ea55e407..388bcc2889aa 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -339,10 +339,8 @@ static int host1x_probe(struct platform_device *pdev) } syncpt_irq = platform_get_irq(pdev, 0); - if (syncpt_irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq); + if (syncpt_irq < 0) return syncpt_irq; - } mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index dd1cd0142941..fce7892d5137 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -421,7 +421,7 @@ int host1x_syncpt_init(struct host1x *host) struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags) { - struct host1x *host = dev_get_drvdata(client->parent->parent); + struct host1x *host = dev_get_drvdata(client->host->parent); return host1x_syncpt_alloc(host, client, flags); } diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 3c82de5f9417..9add0fd5fa6c 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -9,12 +9,54 @@ #include <linux/mailbox_controller.h> #include <linux/soc/mediatek/mtk-cmdq.h> -#define CMDQ_ARG_A_WRITE_MASK 0xffff #define CMDQ_WRITE_ENABLE_MASK BIT(0) +#define CMDQ_POLL_ENABLE_MASK BIT(0) #define CMDQ_EOC_IRQ_EN BIT(0) #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ << 32 | CMDQ_EOC_IRQ_EN) +struct cmdq_instruction { + union { + u32 value; + u32 mask; + }; + union { + u16 offset; + u16 event; + }; + u8 subsys; + u8 op; +}; + +int cmdq_dev_get_client_reg(struct device *dev, + struct cmdq_client_reg *client_reg, int idx) +{ + struct of_phandle_args spec; + int err; + + if (!client_reg) + return -ENOENT; + + err = of_parse_phandle_with_fixed_args(dev->of_node, + "mediatek,gce-client-reg", + 3, idx, &spec); + if (err < 0) { + dev_err(dev, + "error %d can't parse gce-client-reg property (%d)", + err, idx); + + return err; + } + + client_reg->subsys = (u8)spec.args[0]; + client_reg->offset = (u16)spec.args[1]; + client_reg->size = (u16)spec.args[2]; + of_node_put(spec.np); + + return 0; +} +EXPORT_SYMBOL(cmdq_dev_get_client_reg); + static void cmdq_client_timeout(struct timer_list *t) { struct cmdq_client *client = from_timer(client, t, timer); @@ -110,10 +152,10 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt) } EXPORT_SYMBOL(cmdq_pkt_destroy); -static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, - u32 arg_a, u32 arg_b) +static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, + struct cmdq_instruction inst) { - u64 *cmd_ptr; + struct cmdq_instruction *cmd_ptr; if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) { /* @@ -129,8 +171,9 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, __func__, (u32)pkt->buf_size); return -ENOMEM; } + cmd_ptr = pkt->va_base + pkt->cmd_buf_size; - (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b; + *cmd_ptr = inst; pkt->cmd_buf_size += CMDQ_INST_SIZE; return 0; @@ -138,24 +181,34 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) { - u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) | - (subsys << CMDQ_SUBSYS_SHIFT); + struct cmdq_instruction inst; - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value); + inst.op = CMDQ_CODE_WRITE; + inst.value = value; + inst.offset = offset; + inst.subsys = subsys; + + return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_write); int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask) { - u32 offset_mask = offset; - int err = 0; + struct cmdq_instruction inst = { {0} }; + u16 offset_mask = offset; + int err; if (mask != 0xffffffff) { - err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask); + inst.op = CMDQ_CODE_MASK; + inst.mask = ~mask; + err = cmdq_pkt_append_command(pkt, inst); + if (err < 0) + return err; + offset_mask |= CMDQ_WRITE_ENABLE_MASK; } - err |= cmdq_pkt_write(pkt, subsys, offset_mask, value); + err = cmdq_pkt_write(pkt, subsys, offset_mask, value); return err; } @@ -163,43 +216,85 @@ EXPORT_SYMBOL(cmdq_pkt_write_mask); int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) { - u32 arg_b; + struct cmdq_instruction inst = { {0} }; if (event >= CMDQ_MAX_EVENT) return -EINVAL; - /* - * WFE arg_b - * bit 0-11: wait value - * bit 15: 1 - wait, 0 - no wait - * bit 16-27: update value - * bit 31: 1 - update, 0 - no update - */ - arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_OPTION; + inst.event = event; - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b); + return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_wfe); int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) { + struct cmdq_instruction inst = { {0} }; + if (event >= CMDQ_MAX_EVENT) return -EINVAL; - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, - CMDQ_WFE_UPDATE); + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_UPDATE; + inst.event = event; + + return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_clear_event); +int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value) +{ + struct cmdq_instruction inst = { {0} }; + int err; + + inst.op = CMDQ_CODE_POLL; + inst.value = value; + inst.offset = offset; + inst.subsys = subsys; + err = cmdq_pkt_append_command(pkt, inst); + + return err; +} +EXPORT_SYMBOL(cmdq_pkt_poll); + +int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value, u32 mask) +{ + struct cmdq_instruction inst = { {0} }; + int err; + + inst.op = CMDQ_CODE_MASK; + inst.mask = ~mask; + err = cmdq_pkt_append_command(pkt, inst); + if (err < 0) + return err; + + offset = offset | CMDQ_POLL_ENABLE_MASK; + err = cmdq_pkt_poll(pkt, subsys, offset, value); + + return err; +} +EXPORT_SYMBOL(cmdq_pkt_poll_mask); + static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { + struct cmdq_instruction inst = { {0} }; int err; /* insert EOC and generate IRQ for each command iteration */ - err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN); + inst.op = CMDQ_CODE_EOC; + inst.value = CMDQ_EOC_IRQ_EN; + err = cmdq_pkt_append_command(pkt, inst); + if (err < 0) + return err; /* JUMP to end */ - err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS); + inst.op = CMDQ_CODE_JUMP; + inst.value = CMDQ_JUMP_PASS; + err = cmdq_pkt_append_command(pkt, inst); return err; } diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 6edeb9228c4e..62d216ff1097 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -24,16 +24,20 @@ struct iommu_group; * struct host1x_client_ops - host1x client operations * @init: host1x client initialization code * @exit: host1x client tear down code + * @suspend: host1x client suspend code + * @resume: host1x client resume code */ struct host1x_client_ops { int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); + int (*suspend)(struct host1x_client *client); + int (*resume)(struct host1x_client *client); }; /** * struct host1x_client - host1x client structure * @list: list node for the host1x client - * @parent: pointer to struct device representing the host1x controller + * @host: pointer to struct device representing the host1x controller * @dev: pointer to struct device backing this host1x client * @group: IOMMU group that this client is a member of * @ops: host1x client operations @@ -44,7 +48,7 @@ struct host1x_client_ops { */ struct host1x_client { struct list_head list; - struct device *parent; + struct device *host; struct device *dev; struct iommu_group *group; @@ -55,6 +59,10 @@ struct host1x_client { struct host1x_syncpt **syncpts; unsigned int num_syncpts; + + struct host1x_client *parent; + unsigned int usecount; + struct mutex lock; }; /* @@ -309,6 +317,9 @@ int host1x_device_exit(struct host1x_device *device); int host1x_client_register(struct host1x_client *client); int host1x_client_unregister(struct host1x_client *client); +int host1x_client_suspend(struct host1x_client *client); +int host1x_client_resume(struct host1x_client *client); + struct tegra_mipi_device; struct tegra_mipi_device *tegra_mipi_request(struct device *device); diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index e6f54ef6698b..a4dc45fbec0a 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -20,6 +20,16 @@ #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 +/* + * WFE arg_b + * bit 0-11: wait value + * bit 15: 1 - wait, 0 - no wait + * bit 16-27: update value + * bit 31: 1 - update, 0 - no update + */ +#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \ + CMDQ_WFE_WAIT_VALUE) + /** cmdq event maximum */ #define CMDQ_MAX_EVENT 0x3ff @@ -45,6 +55,7 @@ enum cmdq_code { CMDQ_CODE_MASK = 0x02, CMDQ_CODE_WRITE = 0x04, + CMDQ_CODE_POLL = 0x08, CMDQ_CODE_JUMP = 0x10, CMDQ_CODE_WFE = 0x20, CMDQ_CODE_EOC = 0x40, diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 9618debb9ceb..a74c1d5acdf3 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -15,6 +15,12 @@ struct cmdq_pkt; +struct cmdq_client_reg { + u8 subsys; + u16 offset; + u16 size; +}; + struct cmdq_client { spinlock_t lock; u32 pkt_cnt; @@ -25,6 +31,21 @@ struct cmdq_client { }; /** + * cmdq_dev_get_client_reg() - parse cmdq client reg from the device + * node of CMDQ client + * @dev: device of CMDQ mailbox client + * @client_reg: CMDQ client reg pointer + * @idx: the index of desired reg + * + * Return: 0 for success; else the error code is returned + * + * Help CMDQ client parsing the cmdq client reg + * from the device node of CMDQ client. + */ +int cmdq_dev_get_client_reg(struct device *dev, + struct cmdq_client_reg *client_reg, int idx); + +/** * cmdq_mbox_create() - create CMDQ mailbox client and channel * @dev: device of CMDQ mailbox client * @index: index of CMDQ mailbox channel @@ -100,6 +121,38 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); /** + * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to + * execute an instruction that wait for a specified + * hardware register to check for the value w/o mask. + * All GCE hardware threads will be blocked by this + * instruction. + * @pkt: the CMDQ packet + * @subsys: the CMDQ sub system code + * @offset: register offset from CMDQ sub system + * @value: the specified target register value + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value); + +/** + * cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to + * execute an instruction that wait for a specified + * hardware register to check for the value w/ mask. + * All GCE hardware threads will be blocked by this + * instruction. + * @pkt: the CMDQ packet + * @subsys: the CMDQ sub system code + * @offset: register offset from CMDQ sub system + * @value: the specified target register value + * @mask: the specified target register mask + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value, u32 mask); +/** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet * @pkt: the CMDQ packet |