summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2025-04-15 13:12:02 +0200
committerThomas Zimmermann <tzimmermann@suse.de>2025-04-15 13:12:02 +0200
commite12b34c57122f93ec60bddd09010c0c9cbcec9b8 (patch)
treed404bfd9bd3ffc7cbf95b8e5177df8df6bb3f987
parentfec450ca15af63649e219060f37a8ec673333726 (diff)
parentb60301774a8fe6c30b14a95104ec099290a2e904 (diff)
Merge drm/drm-next into drm-misc-next
Backmerging to get fixes from v6.15-rc2 into drm-misc-next. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/rsb.rst268
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/arch/x86/cpuinfo.rst69
-rw-r--r--Documentation/filesystems/ext4/super.rst20
-rw-r--r--Documentation/networking/netdevices.rst10
-rw-r--r--Documentation/virt/kvm/api.rst723
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile5
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/collie_defconfig1
-rw-r--r--arch/arm/configs/davinci_all_defconfig1
-rw-r--r--arch/arm/configs/dove_defconfig1
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/lpc18xx_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig2
-rw-r--r--arch/arm/configs/mmp2_defconfig1
-rw-r--r--arch/arm/configs/multi_v4t_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig3
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/pxa168_defconfig1
-rw-r--r--arch/arm/configs/pxa910_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/configs/s5pv210_defconfig1
-rw-r--r--arch/arm/configs/sama7_defconfig2
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/configs/stm32_defconfig1
-rw-r--r--arch/arm/configs/wpcm450_defconfig2
-rw-r--r--arch/arm64/include/asm/esr.h44
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h7
-rw-r--r--arch/arm64/include/asm/kvm_ras.h2
-rw-r--r--arch/arm64/include/asm/rqspinlock.h2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/fault.h70
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c9
-rw-r--r--arch/arm64/kvm/mmu.c31
-rw-r--r--arch/arm64/tools/sysreg7
-rw-r--r--arch/hexagon/configs/comet_defconfig3
-rw-r--r--arch/m68k/configs/amcore_defconfig1
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip30_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/omega2p_defconfig1
-rw-r--r--arch/mips/configs/rb532_defconfig1
-rw-r--r--arch/mips/configs/rt305x_defconfig1
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig1
-rw-r--r--arch/mips/configs/vocore2_defconfig1
-rw-r--r--arch/mips/configs/xway_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig2
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig1
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig1
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/86xx-hw.config1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/gamecube_defconfig1
-rw-r--r--arch/powerpc/configs/linkstation_defconfig2
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig2
-rw-r--r--arch/powerpc/configs/storcenter_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig1
-rw-r--r--arch/s390/Kconfig19
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/include/asm/march.h4
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c11
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c167
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c3
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kvm/intercept.c2
-rw-r--r--arch/s390/kvm/interrupt.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c10
-rw-r--r--arch/s390/kvm/trace-s390.h4
-rw-r--r--arch/s390/tools/gen_facilities.c3
-rw-r--r--arch/sh/configs/ap325rxa_defconfig1
-rw-r--r--arch/sh/configs/ecovec24_defconfig1
-rw-r--r--arch/sh/configs/edosk7705_defconfig1
-rw-r--r--arch/sh/configs/espt_defconfig1
-rw-r--r--arch/sh/configs/hp6xx_defconfig2
-rw-r--r--arch/sh/configs/kfr2r09-romimage_defconfig1
-rw-r--r--arch/sh/configs/landisk_defconfig1
-rw-r--r--arch/sh/configs/lboxre2_defconfig1
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig2
-rw-r--r--arch/sh/configs/migor_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig1
-rw-r--r--arch/sh/configs/r7785rp_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig1
-rw-r--r--arch/sh/configs/sdk7780_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig3
-rw-r--r--arch/sh/configs/se7712_defconfig1
-rw-r--r--arch/sh/configs/se7721_defconfig1
-rw-r--r--arch/sh/configs/se7724_defconfig1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/configs/sh2007_defconfig2
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig1
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig1
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/x86/entry/entry.S9
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h12
-rw-r--r--arch/x86/include/asm/smap.h12
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/cpu/amd.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c101
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c48
-rw-r--r--arch/x86/kernel/e820.c17
-rw-r--r--arch/x86/kernel/early_printk.c10
-rw-r--r--arch/x86/kvm/cpuid.c8
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c8
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c37
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/tlb.c6
-rw-r--r--arch/x86/power/hibernate_asm_64.S4
-rw-r--r--arch/x86/xen/enlighten.c10
-rw-r--r--arch/x86/xen/enlighten_pvh.c19
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--arch/x86/xen/xen-asm.S4
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c4
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c3
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c24
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/ec.c28
-rw-r--r--drivers/acpi/pptt.c4
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/sata_sx4.c13
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/ublk_drv.c85
-rw-r--r--drivers/char/agp/intel-gtt.c55
-rw-r--r--drivers/dma-buf/udmabuf.c2
-rw-r--r--drivers/firmware/smccc/kvm_guest.c4
-rw-r--r--drivers/gpio/TODO34
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c4
-rw-r--r--drivers/gpio/gpio-tegra186.c27
-rw-r--r--drivers/gpio/gpio-zynq.c1
-rw-r--r--drivers/gpio/gpiolib-devres.c6
-rw-r--r--drivers/gpio/gpiolib-of.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c9
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c19
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c8
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c24
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1222
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c635
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c919
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c746
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c211
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c623
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c623
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c172
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c149
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c288
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c320
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c6
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c741
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h24
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c118
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c123
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c15
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c18
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c21
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c27
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c7
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c6
-rw-r--r--drivers/gpu/drm/sti/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c9
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c10
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c22
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c26
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c1
-rw-r--r--drivers/gpu/drm/xe/Makefile1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h76
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c42
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c6
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c12
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c108
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c13
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c7
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c32
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c4
-rw-r--r--drivers/iommu/intel/iommu.c1
-rw-r--r--drivers/iommu/intel/irq_remapping.c29
-rw-r--r--drivers/iommu/iommu.c6
-rw-r--r--drivers/iommu/ipmmu-vmsa.c27
-rw-r--r--drivers/iommu/mtk_iommu.c26
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c1
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c1
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/mtd/inftlcore.c9
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/raw/r852.c3
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c6
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h3
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/ppp/ppp_synctty.c5
-rw-r--r--drivers/nvme/host/core.c9
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c74
-rw-r--r--drivers/pci/quirks.c12
-rw-r--r--drivers/pwm/core.c13
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c10
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c6
-rw-r--r--drivers/pwm/pwm-mediatek.c8
-rw-r--r--drivers/pwm/pwm-rcar.c24
-rw-r--r--drivers/pwm/pwm-stm32.c12
-rw-r--r--drivers/s390/virtio/virtio_ccw.c16
-rw-r--r--drivers/spi/spi-fsl-qspi.c8
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c34
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--fs/bcachefs/Kconfig7
-rw-r--r--fs/bcachefs/bcachefs.h4
-rw-r--r--fs/bcachefs/btree_journal_iter.c5
-rw-r--r--fs/bcachefs/btree_node_scan.c6
-rw-r--r--fs/bcachefs/btree_write_buffer.c8
-rw-r--r--fs/bcachefs/checksum.c247
-rw-r--r--fs/bcachefs/checksum.h3
-rw-r--r--fs/bcachefs/data_update.c2
-rw-r--r--fs/bcachefs/dirent.c4
-rw-r--r--fs/bcachefs/fs-io-buffered.c17
-rw-r--r--fs/bcachefs/io_read.c3
-rw-r--r--fs/bcachefs/journal_io.c2
-rw-r--r--fs/bcachefs/recovery.c6
-rw-r--r--fs/bcachefs/super.c10
-rw-r--r--fs/btrfs/Kconfig2
-rw-r--r--fs/ceph/Kconfig2
-rw-r--r--fs/erofs/Kconfig2
-rw-r--r--fs/erofs/erofs_fs.h8
-rw-r--r--fs/erofs/fileio.c2
-rw-r--r--fs/erofs/zdata.c1
-rw-r--r--fs/erofs/zmap.c5
-rw-r--r--fs/ext4/block_validity.c5
-rw-r--r--fs/ext4/inode.c75
-rw-r--r--fs/ext4/mballoc.c18
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/smb/client/cifsencrypt.c16
-rw-r--r--fs/smb/client/cifsglob.h6
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/connect.c2
-rw-r--r--fs/smb/client/inode.c25
-rw-r--r--fs/smb/client/reparse.c63
-rw-r--r--fs/smb/client/reparse.h5
-rw-r--r--fs/smb/client/sess.c60
-rw-r--r--fs/smb/client/smb1ops.c53
-rw-r--r--fs/smb/client/smb2ops.c14
-rw-r--r--fs/smb/client/smb2pdu.c11
-rw-r--r--fs/smb/common/smb2pdu.h6
-rw-r--r--fs/smb/server/smb_common.h2
-rw-r--r--fs/xfs/Kconfig2
-rw-r--r--include/drm/drm_kunit_helpers.h3
-rw-r--r--include/drm/intel/intel-gtt.h2
-rw-r--r--include/drm/intel/pciids.h1
-rw-r--r--include/kunit/test.h2
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/gpio/consumer.h1
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h40
-rw-r--r--include/vdso/unaligned.h12
-rw-r--r--io_uring/kbuf.c2
-rw-r--r--io_uring/rsrc.c17
-rw-r--r--io_uring/zcrx.c19
-rw-r--r--io_uring/zcrx.h5
-rw-r--r--kernel/bpf/queue_stack_maps.c35
-rw-r--r--kernel/bpf/ringbuf.c17
-rw-r--r--kernel/bpf/rqspinlock.c2
-rw-r--r--kernel/cgroup/cgroup.c6
-rw-r--r--kernel/cgroup/cpuset-internal.h1
-rw-r--r--kernel/cgroup/cpuset.c401
-rw-r--r--kernel/cgroup/rstat.c3
-rw-r--r--kernel/events/core.c70
-rw-r--r--kernel/events/uprobes.c15
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/tick-common.c22
-rw-r--r--kernel/trace/fprobe.c170
-rw-r--r--kernel/trace/ftrace.c314
-rw-r--r--kernel/trace/rv/rv.c7
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace_events_synth.c1
-rw-r--r--kernel/trace/trace_fprobe.c26
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--lib/Kconfig57
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/ceph/Kconfig2
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/dev_api.c16
-rw-r--r--net/core/filter.c80
-rw-r--r--net/core/link_watch.c28
-rw-r--r--net/core/lock_debug.c2
-rw-r--r--net/core/rtnetlink.c17
-rw-r--r--net/core/sock.c5
-rw-r--r--net/ethtool/cmis.h1
-rw-r--r--net/ethtool/cmis_cdb.c18
-rw-r--r--net/ethtool/common.c1
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ethtool/netlink.c8
-rw-r--r--net/hsr/hsr_device.c6
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/mptcp/subflow.c8
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/ipvs/Kconfig2
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c3
-rw-r--r--net/openvswitch/Kconfig2
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/cls_api.c66
-rw-r--r--net/sched/sch_codel.c5
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_ets.c8
-rw-r--r--net/sched/sch_fq_codel.c6
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_qfq.c7
-rw-r--r--net/sched/sch_sfq.c66
-rw-r--r--net/sctp/Kconfig2
-rw-r--r--net/sctp/socket.c22
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/tipc/link.c1
-rw-r--r--net/tls/tls_main.c6
-rw-r--r--tools/objtool/arch/x86/decode.c18
-rw-r--r--tools/objtool/arch/x86/special.c2
-rw-r--r--tools/objtool/check.c59
-rw-r--r--tools/objtool/include/objtool/arch.h3
-rw-r--r--tools/testing/kunit/kunit_parser.py4
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py4
-rw-r--r--tools/testing/memblock/internal.h6
-rw-r--r--tools/testing/memblock/linux/mutex.h14
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/config.x86_641
-rw-r--r--tools/testing/selftests/bpf/prog_tests/res_spin_lock.c7
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock.c10
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh617
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py33
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc177
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c2
-rw-r--r--tools/testing/selftests/hid/config.common1
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm45
-rw-r--r--tools/testing/selftests/kvm/arm64/page_fault_test.c2
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h67
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c60
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c5
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c31
-rw-r--r--tools/testing/selftests/kvm/x86/monitor_mwait_test.c108
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c3
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh18
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh39
-rw-r--r--tools/testing/selftests/net/skf_net_off.c244
-rwxr-xr-xtools/testing/selftests/net/skf_net_off.sh30
-rw-r--r--tools/testing/selftests/net/tls.c36
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json155
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json36
-rw-r--r--tools/testing/selftests/tpm2/.gitignore3
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_04.sh24
-rw-r--r--virt/kvm/Kconfig2
-rw-r--r--virt/kvm/eventfd.c10
539 files changed, 10578 insertions, 7423 deletions
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index ff0b440ef2dc..451874b8135d 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
srso
gather_data_sampling
reg-file-data-sampling
+ rsb
diff --git a/Documentation/admin-guide/hw-vuln/rsb.rst b/Documentation/admin-guide/hw-vuln/rsb.rst
new file mode 100644
index 000000000000..21dbf9cf25f8
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/rsb.rst
@@ -0,0 +1,268 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+RSB-related mitigations
+=======================
+
+.. warning::
+ Please keep this document up-to-date, otherwise you will be
+ volunteered to update it and convert it to a very long comment in
+ bugs.c!
+
+Since 2018 there have been many Spectre CVEs related to the Return Stack
+Buffer (RSB) (sometimes referred to as the Return Address Stack (RAS) or
+Return Address Predictor (RAP) on AMD).
+
+Information about these CVEs and how to mitigate them is scattered
+amongst a myriad of microarchitecture-specific documents.
+
+This document attempts to consolidate all the relevant information in
+once place and clarify the reasoning behind the current RSB-related
+mitigations. It's meant to be as concise as possible, focused only on
+the current kernel mitigations: what are the RSB-related attack vectors
+and how are they currently being mitigated?
+
+It's *not* meant to describe how the RSB mechanism operates or how the
+exploits work. More details about those can be found in the references
+below.
+
+Rather, this is basically a glorified comment, but too long to actually
+be one. So when the next CVE comes along, a kernel developer can
+quickly refer to this as a refresher to see what we're actually doing
+and why.
+
+At a high level, there are two classes of RSB attacks: RSB poisoning
+(Intel and AMD) and RSB underflow (Intel only). They must each be
+considered individually for each attack vector (and microarchitecture
+where applicable).
+
+----
+
+RSB poisoning (Intel and AMD)
+=============================
+
+SpectreRSB
+~~~~~~~~~~
+
+RSB poisoning is a technique used by SpectreRSB [#spectre-rsb]_ where
+an attacker poisons an RSB entry to cause a victim's return instruction
+to speculate to an attacker-controlled address. This can happen when
+there are unbalanced CALLs/RETs after a context switch or VMEXIT.
+
+* All attack vectors can potentially be mitigated by flushing out any
+ poisoned RSB entries using an RSB filling sequence
+ [#intel-rsb-filling]_ [#amd-rsb-filling]_ when transitioning between
+ untrusted and trusted domains. But this has a performance impact and
+ should be avoided whenever possible.
+
+ .. DANGER::
+ **FIXME**: Currently we're flushing 32 entries. However, some CPU
+ models have more than 32 entries. The loop count needs to be
+ increased for those. More detailed information is needed about RSB
+ sizes.
+
+* On context switch, the user->user mitigation requires ensuring the
+ RSB gets filled or cleared whenever IBPB gets written [#cond-ibpb]_
+ during a context switch:
+
+ * AMD:
+ On Zen 4+, IBPB (or SBPB [#amd-sbpb]_ if used) clears the RSB.
+ This is indicated by IBPB_RET in CPUID [#amd-ibpb-rsb]_.
+
+ On Zen < 4, the RSB filling sequence [#amd-rsb-filling]_ must be
+ always be done in addition to IBPB [#amd-ibpb-no-rsb]_. This is
+ indicated by X86_BUG_IBPB_NO_RET.
+
+ * Intel:
+ IBPB always clears the RSB:
+
+ "Software that executed before the IBPB command cannot control
+ the predicted targets of indirect branches executed after the
+ command on the same logical processor. The term indirect branch
+ in this context includes near return instructions, so these
+ predicted targets may come from the RSB." [#intel-ibpb-rsb]_
+
+* On context switch, user->kernel attacks are prevented by SMEP. User
+ space can only insert user space addresses into the RSB. Even
+ non-canonical addresses can't be inserted due to the page gap at the
+ end of the user canonical address space reserved by TASK_SIZE_MAX.
+ A SMEP #PF at instruction fetch prevents the kernel from speculatively
+ executing user space.
+
+ * AMD:
+ "Finally, branches that are predicted as 'ret' instructions get
+ their predicted targets from the Return Address Predictor (RAP).
+ AMD recommends software use a RAP stuffing sequence (mitigation
+ V2-3 in [2]) and/or Supervisor Mode Execution Protection (SMEP)
+ to ensure that the addresses in the RAP are safe for
+ speculation. Collectively, we refer to these mitigations as "RAP
+ Protection"." [#amd-smep-rsb]_
+
+ * Intel:
+ "On processors with enhanced IBRS, an RSB overwrite sequence may
+ not suffice to prevent the predicted target of a near return
+ from using an RSB entry created in a less privileged predictor
+ mode. Software can prevent this by enabling SMEP (for
+ transitions from user mode to supervisor mode) and by having
+ IA32_SPEC_CTRL.IBRS set during VM exits." [#intel-smep-rsb]_
+
+* On VMEXIT, guest->host attacks are mitigated by eIBRS (and PBRSB
+ mitigation if needed):
+
+ * AMD:
+ "When Automatic IBRS is enabled, the internal return address
+ stack used for return address predictions is cleared on VMEXIT."
+ [#amd-eibrs-vmexit]_
+
+ * Intel:
+ "On processors with enhanced IBRS, an RSB overwrite sequence may
+ not suffice to prevent the predicted target of a near return
+ from using an RSB entry created in a less privileged predictor
+ mode. Software can prevent this by enabling SMEP (for
+ transitions from user mode to supervisor mode) and by having
+ IA32_SPEC_CTRL.IBRS set during VM exits. Processors with
+ enhanced IBRS still support the usage model where IBRS is set
+ only in the OS/VMM for OSes that enable SMEP. To do this, such
+ processors will ensure that guest behavior cannot control the
+ RSB after a VM exit once IBRS is set, even if IBRS was not set
+ at the time of the VM exit." [#intel-eibrs-vmexit]_
+
+ Note that some Intel CPUs are susceptible to Post-barrier Return
+ Stack Buffer Predictions (PBRSB) [#intel-pbrsb]_, where the last
+ CALL from the guest can be used to predict the first unbalanced RET.
+ In this case the PBRSB mitigation is needed in addition to eIBRS.
+
+AMD RETBleed / SRSO / Branch Type Confusion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On AMD, poisoned RSB entries can also be created by the AMD RETBleed
+variant [#retbleed-paper]_ [#amd-btc]_ or by Speculative Return Stack
+Overflow [#amd-srso]_ (Inception [#inception-paper]_). The kernel
+protects itself by replacing every RET in the kernel with a branch to a
+single safe RET.
+
+----
+
+RSB underflow (Intel only)
+==========================
+
+RSB Alternate (RSBA) ("Intel Retbleed")
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some Intel Skylake-generation CPUs are susceptible to the Intel variant
+of RETBleed [#retbleed-paper]_ (Return Stack Buffer Underflow
+[#intel-rsbu]_). If a RET is executed when the RSB buffer is empty due
+to mismatched CALLs/RETs or returning from a deep call stack, the branch
+predictor can fall back to using the Branch Target Buffer (BTB). If a
+user forces a BTB collision then the RET can speculatively branch to a
+user-controlled address.
+
+* Note that RSB filling doesn't fully mitigate this issue. If there
+ are enough unbalanced RETs, the RSB may still underflow and fall back
+ to using a poisoned BTB entry.
+
+* On context switch, user->user underflow attacks are mitigated by the
+ conditional IBPB [#cond-ibpb]_ on context switch which effectively
+ clears the BTB:
+
+ * "The indirect branch predictor barrier (IBPB) is an indirect branch
+ control mechanism that establishes a barrier, preventing software
+ that executed before the barrier from controlling the predicted
+ targets of indirect branches executed after the barrier on the same
+ logical processor." [#intel-ibpb-btb]_
+
+* On context switch and VMEXIT, user->kernel and guest->host RSB
+ underflows are mitigated by IBRS or eIBRS:
+
+ * "Enabling IBRS (including enhanced IBRS) will mitigate the "RSBU"
+ attack demonstrated by the researchers. As previously documented,
+ Intel recommends the use of enhanced IBRS, where supported. This
+ includes any processor that enumerates RRSBA but not RRSBA_DIS_S."
+ [#intel-rsbu]_
+
+ However, note that eIBRS and IBRS do not mitigate intra-mode attacks.
+ Like RRSBA below, this is mitigated by clearing the BHB on kernel
+ entry.
+
+ As an alternative to classic IBRS, call depth tracking (combined with
+ retpolines) can be used to track kernel returns and fill the RSB when
+ it gets close to being empty.
+
+Restricted RSB Alternate (RRSBA)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some newer Intel CPUs have Restricted RSB Alternate (RRSBA) behavior,
+which, similar to RSBA described above, also falls back to using the BTB
+on RSB underflow. The only difference is that the predicted targets are
+restricted to the current domain when eIBRS is enabled:
+
+* "Restricted RSB Alternate (RRSBA) behavior allows alternate branch
+ predictors to be used by near RET instructions when the RSB is
+ empty. When eIBRS is enabled, the predicted targets of these
+ alternate predictors are restricted to those belonging to the
+ indirect branch predictor entries of the current prediction domain.
+ [#intel-eibrs-rrsba]_
+
+When a CPU with RRSBA is vulnerable to Branch History Injection
+[#bhi-paper]_ [#intel-bhi]_, an RSB underflow could be used for an
+intra-mode BTI attack. This is mitigated by clearing the BHB on
+kernel entry.
+
+However if the kernel uses retpolines instead of eIBRS, it needs to
+disable RRSBA:
+
+* "Where software is using retpoline as a mitigation for BHI or
+ intra-mode BTI, and the processor both enumerates RRSBA and
+ enumerates RRSBA_DIS controls, it should disable this behavior."
+ [#intel-retpoline-rrsba]_
+
+----
+
+References
+==========
+
+.. [#spectre-rsb] `Spectre Returns! Speculation Attacks using the Return Stack Buffer <https://arxiv.org/pdf/1807.07940.pdf>`_
+
+.. [#intel-rsb-filling] "Empty RSB Mitigation on Skylake-generation" in `Retpoline: A Branch Target Injection Mitigation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/retpoline-branch-target-injection-mitigation.html#inpage-nav-5-1>`_
+
+.. [#amd-rsb-filling] "Mitigation V2-3" in `Software Techniques for Managing Speculation <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/software-techniques-for-managing-speculation.pdf>`_
+
+.. [#cond-ibpb] Whether IBPB is written depends on whether the prev and/or next task is protected from Spectre attacks. It typically requires opting in per task or system-wide. For more details see the documentation for the ``spectre_v2_user`` cmdline option in Documentation/admin-guide/kernel-parameters.txt.
+
+.. [#amd-sbpb] IBPB without flushing of branch type predictions. Only exists for AMD.
+
+.. [#amd-ibpb-rsb] "Function 8000_0008h -- Processor Capacity Parameters and Extended Feature Identification" in `AMD64 Architecture Programmer's Manual Volume 3: General-Purpose and System Instructions <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24594.pdf>`_. SBPB behaves the same way according to `this email <https://lore.kernel.org/5175b163a3736ca5fd01cedf406735636c99a>`_.
+
+.. [#amd-ibpb-no-rsb] `Spectre Attacks: Exploiting Speculative Execution <https://comsec.ethz.ch/wp-content/files/ibpb_sp25.pdf>`_
+
+.. [#intel-ibpb-rsb] "Introduction" in `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
+
+.. [#amd-smep-rsb] "Existing Mitigations" in `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
+
+.. [#intel-smep-rsb] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
+
+.. [#amd-eibrs-vmexit] "Extended Feature Enable Register (EFER)" in `AMD64 Architecture Programmer's Manual Volume 2: System Programming <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf>`_
+
+.. [#intel-eibrs-vmexit] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
+
+.. [#intel-pbrsb] `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
+
+.. [#retbleed-paper] `RETBleed: Arbitrary Speculative Code Execution with Return Instruction <https://comsec.ethz.ch/wp-content/files/retbleed_sec22.pdf>`_
+
+.. [#amd-btc] `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
+
+.. [#amd-srso] `Technical Update Regarding Speculative Return Stack Overflow <https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf>`_
+
+.. [#inception-paper] `Inception: Exposing New Attack Surfaces with Training in Transient Execution <https://comsec.ethz.ch/wp-content/files/inception_sec23.pdf>`_
+
+.. [#intel-rsbu] `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
+
+.. [#intel-ibpb-btb] `Indirect Branch Predictor Barrier' <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-predictor-barrier.html>`_
+
+.. [#intel-eibrs-rrsba] "Guidance for RSBU" in `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
+
+.. [#bhi-paper] `Branch History Injection: On the Effectiveness of Hardware Mitigations Against Cross-Privilege Spectre-v2 Attacks <http://download.vusec.net/papers/bhi-spectre-bhb_sec22.pdf>`_
+
+.. [#intel-bhi] `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
+
+.. [#intel-retpoline-rrsba] "Retpoline" in `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 76e538c77e31..d9fd26b95b34 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1407,18 +1407,15 @@
earlyprintk=serial[,0x...[,baudrate]]
earlyprintk=ttySn[,baudrate]
earlyprintk=dbgp[debugController#]
+ earlyprintk=mmio32,membase[,{nocfg|baudrate}]
earlyprintk=pciserial[,force],bus:device.function[,{nocfg|baudrate}]
earlyprintk=xdbc[xhciController#]
earlyprintk=bios
- earlyprintk=mmio,membase[,{nocfg|baudrate}]
earlyprintk is useful when the kernel crashes before
the normal console is initialized. It is not enabled by
default because it has some cosmetic problems.
- Only 32-bit memory addresses are supported for "mmio"
- and "pciserial" devices.
-
Use "nocfg" to skip UART configuration, assume
BIOS/firmware has configured UART correctly.
diff --git a/Documentation/arch/x86/cpuinfo.rst b/Documentation/arch/x86/cpuinfo.rst
index 6ef426a52cdc..f80e2a558d2a 100644
--- a/Documentation/arch/x86/cpuinfo.rst
+++ b/Documentation/arch/x86/cpuinfo.rst
@@ -79,8 +79,9 @@ feature flags.
How are feature flags created?
==============================
-a: Feature flags can be derived from the contents of CPUID leaves.
-------------------------------------------------------------------
+Feature flags can be derived from the contents of CPUID leaves
+--------------------------------------------------------------
+
These feature definitions are organized mirroring the layout of CPUID
leaves and grouped in words with offsets as mapped in enum cpuid_leafs
in cpufeatures.h (see arch/x86/include/asm/cpufeatures.h for details).
@@ -89,8 +90,9 @@ cpufeatures.h, and if it is detected at run time, the flags will be
displayed accordingly in /proc/cpuinfo. For example, the flag "avx2"
comes from X86_FEATURE_AVX2 in cpufeatures.h.
-b: Flags can be from scattered CPUID-based features.
-----------------------------------------------------
+Flags can be from scattered CPUID-based features
+------------------------------------------------
+
Hardware features enumerated in sparsely populated CPUID leaves get
software-defined values. Still, CPUID needs to be queried to determine
if a given feature is present. This is done in init_scattered_cpuid_features().
@@ -104,8 +106,9 @@ has only one feature and would waste 31 bits of space in the x86_capability[]
array. Since there is a struct cpuinfo_x86 for each possible CPU, the wasted
memory is not trivial.
-c: Flags can be created synthetically under certain conditions for hardware features.
--------------------------------------------------------------------------------------
+Flags can be created synthetically under certain conditions for hardware features
+---------------------------------------------------------------------------------
+
Examples of conditions include whether certain features are present in
MSR_IA32_CORE_CAPS or specific CPU models are identified. If the needed
conditions are met, the features are enabled by the set_cpu_cap or
@@ -114,8 +117,8 @@ the feature X86_FEATURE_SPLIT_LOCK_DETECT will be enabled and
"split_lock_detect" will be displayed. The flag "ring3mwait" will be
displayed only when running on INTEL_XEON_PHI_[KNL|KNM] processors.
-d: Flags can represent purely software features.
-------------------------------------------------
+Flags can represent purely software features
+--------------------------------------------
These flags do not represent hardware features. Instead, they represent a
software feature implemented in the kernel. For example, Kernel Page Table
Isolation is purely software feature and its feature flag X86_FEATURE_PTI is
@@ -130,14 +133,18 @@ x86_cap/bug_flags[] arrays in kernel/cpu/capflags.c. The names in the
resulting x86_cap/bug_flags[] are used to populate /proc/cpuinfo. The naming
of flags in the x86_cap/bug_flags[] are as follows:
-a: The name of the flag is from the string in X86_FEATURE_<name> by default.
-----------------------------------------------------------------------------
-By default, the flag <name> in /proc/cpuinfo is extracted from the respective
-X86_FEATURE_<name> in cpufeatures.h. For example, the flag "avx2" is from
-X86_FEATURE_AVX2.
+Flags do not appear by default in /proc/cpuinfo
+-----------------------------------------------
+
+Feature flags are omitted by default from /proc/cpuinfo as it does not make
+sense for the feature to be exposed to userspace in most cases. For example,
+X86_FEATURE_ALWAYS is defined in cpufeatures.h but that flag is an internal
+kernel feature used in the alternative runtime patching functionality. So the
+flag does not appear in /proc/cpuinfo.
+
+Specify a flag name if absolutely needed
+----------------------------------------
-b: The naming can be overridden.
---------------------------------
If the comment on the line for the #define X86_FEATURE_* starts with a
double-quote character (""), the string inside the double-quote characters
will be the name of the flags. For example, the flag "sse4_1" comes from
@@ -148,36 +155,31 @@ needed. For instance, /proc/cpuinfo is a userspace interface and must remain
constant. If, for some reason, the naming of X86_FEATURE_<name> changes, one
shall override the new naming with the name already used in /proc/cpuinfo.
-c: The naming override can be "", which means it will not appear in /proc/cpuinfo.
-----------------------------------------------------------------------------------
-The feature shall be omitted from /proc/cpuinfo if it does not make sense for
-the feature to be exposed to userspace. For example, X86_FEATURE_ALWAYS is
-defined in cpufeatures.h but that flag is an internal kernel feature used
-in the alternative runtime patching functionality. So, its name is overridden
-with "". Its flag will not appear in /proc/cpuinfo.
-
Flags are missing when one or more of these happen
==================================================
-a: The hardware does not enumerate support for it.
---------------------------------------------------
+The hardware does not enumerate support for it
+----------------------------------------------
+
For example, when a new kernel is running on old hardware or the feature is
not enabled by boot firmware. Even if the hardware is new, there might be a
problem enabling the feature at run time, the flag will not be displayed.
-b: The kernel does not know about the flag.
--------------------------------------------
+The kernel does not know about the flag
+---------------------------------------
+
For example, when an old kernel is running on new hardware.
-c: The kernel disabled support for it at compile-time.
-------------------------------------------------------
+The kernel disabled support for it at compile-time
+--------------------------------------------------
+
For example, if 5-level-paging is not enabled when building (i.e.,
CONFIG_X86_5LEVEL is not selected) the flag "la57" will not show up [#f1]_.
Even though the feature will still be detected via CPUID, the kernel disables
it by clearing via setup_clear_cpu_cap(X86_FEATURE_LA57).
-d: The feature is disabled at boot-time.
-----------------------------------------
+The feature is disabled at boot-time
+------------------------------------
A feature can be disabled either using a command-line parameter or because
it failed to be enabled. The command-line parameter clearcpuid= can be used
to disable features using the feature number as defined in
@@ -190,8 +192,9 @@ disable specific features. The list of parameters includes, but is not limited
to, nofsgsbase, nosgx, noxsave, etc. 5-level paging can also be disabled using
"no5lvl".
-e: The feature was known to be non-functional.
-----------------------------------------------
+The feature was known to be non-functional
+------------------------------------------
+
The feature was known to be non-functional because a dependency was
missing at runtime. For example, AVX flags will not show up if XSAVE feature
is disabled since they depend on XSAVE feature. Another example would be broken
diff --git a/Documentation/filesystems/ext4/super.rst b/Documentation/filesystems/ext4/super.rst
index a1eb4a11a1d0..1b240661bfa3 100644
--- a/Documentation/filesystems/ext4/super.rst
+++ b/Documentation/filesystems/ext4/super.rst
@@ -328,9 +328,13 @@ The ext4 superblock is laid out as follows in
- s_checksum_type
- Metadata checksum algorithm type. The only valid value is 1 (crc32c).
* - 0x176
- - __le16
- - s_reserved_pad
- -
+ - \_\_u8
+ - s\_encryption\_level
+ - Versioning level for encryption.
+ * - 0x177
+ - \_\_u8
+ - s\_reserved\_pad
+ - Padding to next 32bits.
* - 0x178
- __le64
- s_kbytes_written
@@ -466,9 +470,13 @@ The ext4 superblock is laid out as follows in
- s_last_error_time_hi
- Upper 8 bits of the s_last_error_time field.
* - 0x27A
- - __u8
- - s_pad[2]
- - Zero padding.
+ - \_\_u8
+ - s\_first\_error\_errcode
+ -
+ * - 0x27B
+ - \_\_u8
+ - s\_last\_error\_errcode
+ -
* - 0x27C
- __le16
- s_encoding
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index 6c2d8945f597..eab601ab2db0 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -338,10 +338,11 @@ operations directly under the netdev instance lock.
Devices drivers are encouraged to rely on the instance lock where possible.
For the (mostly software) drivers that need to interact with the core stack,
-there are two sets of interfaces: ``dev_xxx`` and ``netif_xxx`` (e.g.,
-``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx`` functions handle
-acquiring the instance lock themselves, while the ``netif_xxx`` functions
-assume that the driver has already acquired the instance lock.
+there are two sets of interfaces: ``dev_xxx``/``netdev_xxx`` and ``netif_xxx``
+(e.g., ``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx``/``netdev_xxx``
+functions handle acquiring the instance lock themselves, while the
+``netif_xxx`` functions assume that the driver has already acquired
+the instance lock.
Notifiers and netdev instance lock
==================================
@@ -354,6 +355,7 @@ For devices with locked ops, currently only the following notifiers are
running under the lock:
* ``NETDEV_REGISTER``
* ``NETDEV_UP``
+* ``NETDEV_CHANGE``
The following notifiers are running without the lock:
* ``NETDEV_UNREGISTER``
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 1f8625b7646a..47c7c3f92314 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -7447,6 +7447,75 @@ Unused bitfields in the bitarrays must be set to zero.
This capability connects the vcpu to an in-kernel XIVE device.
+6.76 KVM_CAP_HYPERV_SYNIC
+-------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+This capability, if KVM_CHECK_EXTENSION indicates that it is
+available, means that the kernel has an implementation of the
+Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is
+used to support Windows Hyper-V based guest paravirt drivers(VMBus).
+
+In order to use SynIC, it has to be activated by setting this
+capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this
+will disable the use of APIC hardware virtualization even if supported
+by the CPU, as it's incompatible with SynIC auto-EOI behavior.
+
+6.77 KVM_CAP_HYPERV_SYNIC2
+--------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+This capability enables a newer version of Hyper-V Synthetic interrupt
+controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
+doesn't clear SynIC message and event flags pages when they are enabled by
+writing to the respective MSRs.
+
+6.78 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
+-----------------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+This capability indicates that KVM running on top of Hyper-V hypervisor
+enables Direct TLB flush for its guests meaning that TLB flush
+hypercalls are handled by Level 0 hypervisor (Hyper-V) bypassing KVM.
+Due to the different ABI for hypercall parameters between Hyper-V and
+KVM, enabling this capability effectively disables all hypercall
+handling by KVM (as some KVM hypercall may be mistakenly treated as TLB
+flush hypercalls by Hyper-V) so userspace should disable KVM identification
+in CPUID and only exposes Hyper-V identification. In this case, guest
+thinks it's running on Hyper-V and only use Hyper-V hypercalls.
+
+6.79 KVM_CAP_HYPERV_ENFORCE_CPUID
+---------------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+When enabled, KVM will disable emulated Hyper-V features provided to the
+guest according to the bits Hyper-V CPUID feature leaves. Otherwise, all
+currently implemented Hyper-V features are provided unconditionally when
+Hyper-V identification is set in the HYPERV_CPUID_INTERFACE (0x40000001)
+leaf.
+
+6.80 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
+-------------------------------------
+
+:Architectures: x86
+:Target: vcpu
+
+When enabled, KVM will disable paravirtual features provided to the
+guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
+(0x40000001). Otherwise, a guest may use the paravirtual features
+regardless of what has actually been exposed through the CPUID leaf.
+
+.. _KVM_CAP_DIRTY_LOG_RING:
+
+
.. _cap_enable_vm:
7. Capabilities that can be enabled on VMs
@@ -7927,10 +7996,10 @@ by POWER10 processor.
7.24 KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
-------------------------------------
-Architectures: x86 SEV enabled
-Type: vm
-Parameters: args[0] is the fd of the source vm
-Returns: 0 on success; ENOTTY on error
+:Architectures: x86 SEV enabled
+:Type: vm
+:Parameters: args[0] is the fd of the source vm
+:Returns: 0 on success; ENOTTY on error
This capability enables userspace to copy encryption context from the vm
indicated by the fd to the vm this is called on.
@@ -7963,24 +8032,6 @@ default.
See Documentation/arch/x86/sgx.rst for more details.
-7.26 KVM_CAP_PPC_RPT_INVALIDATE
--------------------------------
-
-:Capability: KVM_CAP_PPC_RPT_INVALIDATE
-:Architectures: ppc
-:Type: vm
-
-This capability indicates that the kernel is capable of handling
-H_RPT_INVALIDATE hcall.
-
-In order to enable the use of H_RPT_INVALIDATE in the guest,
-user space might have to advertise it for the guest. For example,
-IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
-present in the "ibm,hypertas-functions" device-tree property.
-
-This capability is enabled for hypervisors on platforms like POWER9
-that support radix MMU.
-
7.27 KVM_CAP_EXIT_ON_EMULATION_FAILURE
--------------------------------------
@@ -8038,24 +8089,9 @@ indicated by the fd to the VM this is called on.
This is intended to support intra-host migration of VMs between userspace VMMs,
upgrading the VMM process without interrupting the guest.
-7.30 KVM_CAP_PPC_AIL_MODE_3
--------------------------------
-
-:Capability: KVM_CAP_PPC_AIL_MODE_3
-:Architectures: ppc
-:Type: vm
-
-This capability indicates that the kernel supports the mode 3 setting for the
-"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
-resource that is controlled with the H_SET_MODE hypercall.
-
-This capability allows a guest kernel to use a better-performance mode for
-handling interrupts and system calls.
-
7.31 KVM_CAP_DISABLE_QUIRKS2
----------------------------
-:Capability: KVM_CAP_DISABLE_QUIRKS2
:Parameters: args[0] - set of KVM quirks to disable
:Architectures: x86
:Type: vm
@@ -8210,27 +8246,6 @@ This capability is aimed to mitigate the threat that malicious VMs can
cause CPU stuck (due to event windows don't open up) and make the CPU
unavailable to host or other VMs.
-7.34 KVM_CAP_MEMORY_FAULT_INFO
-------------------------------
-
-:Architectures: x86
-:Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
-
-The presence of this capability indicates that KVM_RUN will fill
-kvm_run.memory_fault if KVM cannot resolve a guest page fault VM-Exit, e.g. if
-there is a valid memslot but no backing VMA for the corresponding host virtual
-address.
-
-The information in kvm_run.memory_fault is valid if and only if KVM_RUN returns
-an error with errno=EFAULT or errno=EHWPOISON *and* kvm_run.exit_reason is set
-to KVM_EXIT_MEMORY_FAULT.
-
-Note: Userspaces which attempt to resolve memory faults so that they can retry
-KVM_RUN are encouraged to guard against repeatedly receiving the same
-error/annotated fault.
-
-See KVM_EXIT_MEMORY_FAULT for more information.
-
7.35 KVM_CAP_X86_APIC_BUS_CYCLES_NS
-----------------------------------
@@ -8248,19 +8263,220 @@ by KVM_CHECK_EXTENSION.
Note: Userspace is responsible for correctly configuring CPUID 0x15, a.k.a. the
core crystal clock frequency, if a non-zero CPUID 0x15 is exposed to the guest.
-7.36 KVM_CAP_X86_GUEST_MODE
-------------------------------
+7.36 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
+----------------------------------------------------------
+
+:Architectures: x86, arm64
+:Type: vm
+:Parameters: args[0] - size of the dirty log ring
+
+KVM is capable of tracking dirty memory using ring buffers that are
+mmapped into userspace; there is one dirty ring per vcpu.
+
+The dirty ring is available to userspace as an array of
+``struct kvm_dirty_gfn``. Each dirty entry is defined as::
+
+ struct kvm_dirty_gfn {
+ __u32 flags;
+ __u32 slot; /* as_id | slot_id */
+ __u64 offset;
+ };
+
+The following values are defined for the flags field to define the
+current state of the entry::
+
+ #define KVM_DIRTY_GFN_F_DIRTY BIT(0)
+ #define KVM_DIRTY_GFN_F_RESET BIT(1)
+ #define KVM_DIRTY_GFN_F_MASK 0x3
+
+Userspace should call KVM_ENABLE_CAP ioctl right after KVM_CREATE_VM
+ioctl to enable this capability for the new guest and set the size of
+the rings. Enabling the capability is only allowed before creating any
+vCPU, and the size of the ring must be a power of two. The larger the
+ring buffer, the less likely the ring is full and the VM is forced to
+exit to userspace. The optimal size depends on the workload, but it is
+recommended that it be at least 64 KiB (4096 entries).
+
+Just like for dirty page bitmaps, the buffer tracks writes to
+all user memory regions for which the KVM_MEM_LOG_DIRTY_PAGES flag was
+set in KVM_SET_USER_MEMORY_REGION. Once a memory region is registered
+with the flag set, userspace can start harvesting dirty pages from the
+ring buffer.
+
+An entry in the ring buffer can be unused (flag bits ``00``),
+dirty (flag bits ``01``) or harvested (flag bits ``1X``). The
+state machine for the entry is as follows::
+
+ dirtied harvested reset
+ 00 -----------> 01 -------------> 1X -------+
+ ^ |
+ | |
+ +------------------------------------------+
+
+To harvest the dirty pages, userspace accesses the mmapped ring buffer
+to read the dirty GFNs. If the flags has the DIRTY bit set (at this stage
+the RESET bit must be cleared), then it means this GFN is a dirty GFN.
+The userspace should harvest this GFN and mark the flags from state
+``01b`` to ``1Xb`` (bit 0 will be ignored by KVM, but bit 1 must be set
+to show that this GFN is harvested and waiting for a reset), and move
+on to the next GFN. The userspace should continue to do this until the
+flags of a GFN have the DIRTY bit cleared, meaning that it has harvested
+all the dirty GFNs that were available.
+
+Note that on weakly ordered architectures, userspace accesses to the
+ring buffer (and more specifically the 'flags' field) must be ordered,
+using load-acquire/store-release accessors when available, or any
+other memory barrier that will ensure this ordering.
+
+It's not necessary for userspace to harvest the all dirty GFNs at once.
+However it must collect the dirty GFNs in sequence, i.e., the userspace
+program cannot skip one dirty GFN to collect the one next to it.
+
+After processing one or more entries in the ring buffer, userspace
+calls the VM ioctl KVM_RESET_DIRTY_RINGS to notify the kernel about
+it, so that the kernel will reprotect those collected GFNs.
+Therefore, the ioctl must be called *before* reading the content of
+the dirty pages.
+
+The dirty ring can get full. When it happens, the KVM_RUN of the
+vcpu will return with exit reason KVM_EXIT_DIRTY_LOG_FULL.
+
+The dirty ring interface has a major difference comparing to the
+KVM_GET_DIRTY_LOG interface in that, when reading the dirty ring from
+userspace, it's still possible that the kernel has not yet flushed the
+processor's dirty page buffers into the kernel buffer (with dirty bitmaps, the
+flushing is done by the KVM_GET_DIRTY_LOG ioctl). To achieve that, one
+needs to kick the vcpu out of KVM_RUN using a signal. The resulting
+vmexit ensures that all dirty GFNs are flushed to the dirty rings.
+
+NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that
+should be exposed by weakly ordered architecture, in order to indicate
+the additional memory ordering requirements imposed on userspace when
+reading the state of an entry and mutating it from DIRTY to HARVESTED.
+Architecture with TSO-like ordering (such as x86) are allowed to
+expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL
+to userspace.
+
+After enabling the dirty rings, the userspace needs to detect the
+capability of KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP to see whether the
+ring structures can be backed by per-slot bitmaps. With this capability
+advertised, it means the architecture can dirty guest pages without
+vcpu/ring context, so that some of the dirty information will still be
+maintained in the bitmap structure. KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
+can't be enabled if the capability of KVM_CAP_DIRTY_LOG_RING_ACQ_REL
+hasn't been enabled, or any memslot has been existing.
+
+Note that the bitmap here is only a backup of the ring structure. The
+use of the ring and bitmap combination is only beneficial if there is
+only a very small amount of memory that is dirtied out of vcpu/ring
+context. Otherwise, the stand-alone per-slot bitmap mechanism needs to
+be considered.
+
+To collect dirty bits in the backup bitmap, userspace can use the same
+KVM_GET_DIRTY_LOG ioctl. KVM_CLEAR_DIRTY_LOG isn't needed as long as all
+the generation of the dirty bits is done in a single pass. Collecting
+the dirty bitmap should be the very last thing that the VMM does before
+considering the state as complete. VMM needs to ensure that the dirty
+state is final and avoid missing dirty pages from another ioctl ordered
+after the bitmap collection.
+
+NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
+tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
+KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
+command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
+"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
+vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
+command on KVM device "kvm-arm-vgic-v3".
+
+7.37 KVM_CAP_PMU_CAPABILITY
+---------------------------
:Architectures: x86
-:Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
+:Type: vm
+:Parameters: arg[0] is bitmask of PMU virtualization capabilities.
+:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits
-The presence of this capability indicates that KVM_RUN will update the
-KVM_RUN_X86_GUEST_MODE bit in kvm_run.flags to indicate whether the
-vCPU was executing nested guest code when it exited.
+This capability alters PMU virtualization in KVM.
-KVM exits with the register state of either the L1 or L2 guest
-depending on which executed at the time of an exit. Userspace must
-take care to differentiate between these cases.
+Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of
+PMU virtualization capabilities that can be adjusted on a VM.
+
+The argument to KVM_ENABLE_CAP is also a bitmask and selects specific
+PMU virtualization capabilities to be applied to the VM. This can
+only be invoked on a VM prior to the creation of VCPUs.
+
+At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
+this capability will disable PMU virtualization for that VM. Usermode
+should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
+
+7.38 KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
+-------------------------------------
+
+:Architectures: x86
+:Type: vm
+:Parameters: arg[0] must be 0.
+:Returns: 0 on success, -EPERM if the userspace process does not
+ have CAP_SYS_BOOT, -EINVAL if args[0] is not 0 or any vCPUs have been
+ created.
+
+This capability disables the NX huge pages mitigation for iTLB MULTIHIT.
+
+The capability has no effect if the nx_huge_pages module parameter is not set.
+
+This capability may only be set before any vCPUs are created.
+
+7.39 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+---------------------------------------
+
+:Architectures: arm64
+:Type: vm
+:Parameters: arg[0] is the new split chunk size.
+:Returns: 0 on success, -EINVAL if any memslot was already created.
+
+This capability sets the chunk size used in Eager Page Splitting.
+
+Eager Page Splitting improves the performance of dirty-logging (used
+in live migrations) when guest memory is backed by huge-pages. It
+avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
+it eagerly when enabling dirty logging (with the
+KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
+KVM_CLEAR_DIRTY_LOG.
+
+The chunk size specifies how many pages to break at a time, using a
+single allocation for each chunk. Bigger the chunk size, more pages
+need to be allocated ahead of time.
+
+The chunk size needs to be a valid block size. The list of acceptable
+block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
+64-bit bitmap (each bit describing a block size). The default value is
+0, to disable the eager page splitting.
+
+7.40 KVM_CAP_EXIT_HYPERCALL
+---------------------------
+
+:Architectures: x86
+:Type: vm
+
+This capability, if enabled, will cause KVM to exit to userspace
+with KVM_EXIT_HYPERCALL exit reason to process some hypercalls.
+
+Calling KVM_CHECK_EXTENSION for this capability will return a bitmask
+of hypercalls that can be configured to exit to userspace.
+Right now, the only such hypercall is KVM_HC_MAP_GPA_RANGE.
+
+The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset
+of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
+the hypercalls whose corresponding bit is in the argument, and return
+ENOSYS for the others.
+
+7.41 KVM_CAP_ARM_SYSTEM_SUSPEND
+-------------------------------
+
+:Architectures: arm64
+:Type: vm
+
+When enabled, KVM will exit to userspace with KVM_EXIT_SYSTEM_EVENT of
+type KVM_SYSTEM_EVENT_SUSPEND to process the guest suspend request.
7.37 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS
-------------------------------------
@@ -8297,21 +8513,6 @@ H_RANDOM hypercall backed by a hardware random-number generator.
If present, the kernel H_RANDOM handler can be enabled for guest use
with the KVM_CAP_PPC_ENABLE_HCALL capability.
-8.2 KVM_CAP_HYPERV_SYNIC
-------------------------
-
-:Architectures: x86
-
-This capability, if KVM_CHECK_EXTENSION indicates that it is
-available, means that the kernel has an implementation of the
-Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is
-used to support Windows Hyper-V based guest paravirt drivers(VMBus).
-
-In order to use SynIC, it has to be activated by setting this
-capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this
-will disable the use of APIC hardware virtualization even if supported
-by the CPU, as it's incompatible with SynIC auto-EOI behavior.
-
8.3 KVM_CAP_PPC_MMU_RADIX
-------------------------
@@ -8362,20 +8563,6 @@ may be incompatible with the MIPS VZ ASE.
virtualization, including standard guest virtual memory segments.
== ==========================================================================
-8.6 KVM_CAP_MIPS_TE
--------------------
-
-:Architectures: mips
-
-This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
-it is available, means that the trap & emulate implementation is available to
-run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
-assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
-to KVM_CREATE_VM to create a VM which utilises it.
-
-If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
-available, it means that the VM is using trap & emulate.
-
8.7 KVM_CAP_MIPS_64BIT
----------------------
@@ -8457,16 +8644,6 @@ virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
(counting from the right) is set, then a virtual SMT mode of 2^N is
available.
-8.11 KVM_CAP_HYPERV_SYNIC2
---------------------------
-
-:Architectures: x86
-
-This capability enables a newer version of Hyper-V Synthetic interrupt
-controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
-doesn't clear SynIC message and event flags pages when they are enabled by
-writing to the respective MSRs.
-
8.12 KVM_CAP_HYPERV_VP_INDEX
----------------------------
@@ -8481,7 +8658,6 @@ capability is absent, userspace can still query this msr's value.
-------------------------------
:Architectures: s390
-:Parameters: none
This capability indicates if the flic device will be able to get/set the
AIS states for migration via the KVM_DEV_FLIC_AISM_ALL attribute and allows
@@ -8555,21 +8731,6 @@ This capability indicates that KVM supports paravirtualized Hyper-V IPI send
hypercalls:
HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
-8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
------------------------------------
-
-:Architectures: x86
-
-This capability indicates that KVM running on top of Hyper-V hypervisor
-enables Direct TLB flush for its guests meaning that TLB flush
-hypercalls are handled by Level 0 hypervisor (Hyper-V) bypassing KVM.
-Due to the different ABI for hypercall parameters between Hyper-V and
-KVM, enabling this capability effectively disables all hypercall
-handling by KVM (as some KVM hypercall may be mistakenly treated as TLB
-flush hypercalls by Hyper-V) so userspace should disable KVM identification
-in CPUID and only exposes Hyper-V identification. In this case, guest
-thinks it's running on Hyper-V and only use Hyper-V hypercalls.
-
8.22 KVM_CAP_S390_VCPU_RESETS
-----------------------------
@@ -8647,142 +8808,6 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.
-8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
--------------------------------------
-
-Architectures: x86
-
-When enabled, KVM will disable paravirtual features provided to the
-guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
-(0x40000001). Otherwise, a guest may use the paravirtual features
-regardless of what has actually been exposed through the CPUID leaf.
-
-.. _KVM_CAP_DIRTY_LOG_RING:
-
-8.29 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
-----------------------------------------------------------
-
-:Architectures: x86, arm64
-:Parameters: args[0] - size of the dirty log ring
-
-KVM is capable of tracking dirty memory using ring buffers that are
-mmapped into userspace; there is one dirty ring per vcpu.
-
-The dirty ring is available to userspace as an array of
-``struct kvm_dirty_gfn``. Each dirty entry is defined as::
-
- struct kvm_dirty_gfn {
- __u32 flags;
- __u32 slot; /* as_id | slot_id */
- __u64 offset;
- };
-
-The following values are defined for the flags field to define the
-current state of the entry::
-
- #define KVM_DIRTY_GFN_F_DIRTY BIT(0)
- #define KVM_DIRTY_GFN_F_RESET BIT(1)
- #define KVM_DIRTY_GFN_F_MASK 0x3
-
-Userspace should call KVM_ENABLE_CAP ioctl right after KVM_CREATE_VM
-ioctl to enable this capability for the new guest and set the size of
-the rings. Enabling the capability is only allowed before creating any
-vCPU, and the size of the ring must be a power of two. The larger the
-ring buffer, the less likely the ring is full and the VM is forced to
-exit to userspace. The optimal size depends on the workload, but it is
-recommended that it be at least 64 KiB (4096 entries).
-
-Just like for dirty page bitmaps, the buffer tracks writes to
-all user memory regions for which the KVM_MEM_LOG_DIRTY_PAGES flag was
-set in KVM_SET_USER_MEMORY_REGION. Once a memory region is registered
-with the flag set, userspace can start harvesting dirty pages from the
-ring buffer.
-
-An entry in the ring buffer can be unused (flag bits ``00``),
-dirty (flag bits ``01``) or harvested (flag bits ``1X``). The
-state machine for the entry is as follows::
-
- dirtied harvested reset
- 00 -----------> 01 -------------> 1X -------+
- ^ |
- | |
- +------------------------------------------+
-
-To harvest the dirty pages, userspace accesses the mmapped ring buffer
-to read the dirty GFNs. If the flags has the DIRTY bit set (at this stage
-the RESET bit must be cleared), then it means this GFN is a dirty GFN.
-The userspace should harvest this GFN and mark the flags from state
-``01b`` to ``1Xb`` (bit 0 will be ignored by KVM, but bit 1 must be set
-to show that this GFN is harvested and waiting for a reset), and move
-on to the next GFN. The userspace should continue to do this until the
-flags of a GFN have the DIRTY bit cleared, meaning that it has harvested
-all the dirty GFNs that were available.
-
-Note that on weakly ordered architectures, userspace accesses to the
-ring buffer (and more specifically the 'flags' field) must be ordered,
-using load-acquire/store-release accessors when available, or any
-other memory barrier that will ensure this ordering.
-
-It's not necessary for userspace to harvest the all dirty GFNs at once.
-However it must collect the dirty GFNs in sequence, i.e., the userspace
-program cannot skip one dirty GFN to collect the one next to it.
-
-After processing one or more entries in the ring buffer, userspace
-calls the VM ioctl KVM_RESET_DIRTY_RINGS to notify the kernel about
-it, so that the kernel will reprotect those collected GFNs.
-Therefore, the ioctl must be called *before* reading the content of
-the dirty pages.
-
-The dirty ring can get full. When it happens, the KVM_RUN of the
-vcpu will return with exit reason KVM_EXIT_DIRTY_LOG_FULL.
-
-The dirty ring interface has a major difference comparing to the
-KVM_GET_DIRTY_LOG interface in that, when reading the dirty ring from
-userspace, it's still possible that the kernel has not yet flushed the
-processor's dirty page buffers into the kernel buffer (with dirty bitmaps, the
-flushing is done by the KVM_GET_DIRTY_LOG ioctl). To achieve that, one
-needs to kick the vcpu out of KVM_RUN using a signal. The resulting
-vmexit ensures that all dirty GFNs are flushed to the dirty rings.
-
-NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that
-should be exposed by weakly ordered architecture, in order to indicate
-the additional memory ordering requirements imposed on userspace when
-reading the state of an entry and mutating it from DIRTY to HARVESTED.
-Architecture with TSO-like ordering (such as x86) are allowed to
-expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL
-to userspace.
-
-After enabling the dirty rings, the userspace needs to detect the
-capability of KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP to see whether the
-ring structures can be backed by per-slot bitmaps. With this capability
-advertised, it means the architecture can dirty guest pages without
-vcpu/ring context, so that some of the dirty information will still be
-maintained in the bitmap structure. KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP
-can't be enabled if the capability of KVM_CAP_DIRTY_LOG_RING_ACQ_REL
-hasn't been enabled, or any memslot has been existing.
-
-Note that the bitmap here is only a backup of the ring structure. The
-use of the ring and bitmap combination is only beneficial if there is
-only a very small amount of memory that is dirtied out of vcpu/ring
-context. Otherwise, the stand-alone per-slot bitmap mechanism needs to
-be considered.
-
-To collect dirty bits in the backup bitmap, userspace can use the same
-KVM_GET_DIRTY_LOG ioctl. KVM_CLEAR_DIRTY_LOG isn't needed as long as all
-the generation of the dirty bits is done in a single pass. Collecting
-the dirty bitmap should be the very last thing that the VMM does before
-considering the state as complete. VMM needs to ensure that the dirty
-state is final and avoid missing dirty pages from another ioctl ordered
-after the bitmap collection.
-
-NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
-tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
-KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
-command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
-"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
-vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
-command on KVM device "kvm-arm-vgic-v3".
-
8.30 KVM_CAP_XEN_HVM
--------------------
@@ -8847,10 +8872,9 @@ clearing the PVCLOCK_TSC_STABLE_BIT flag in Xen pvclock sources. This will be
done when the KVM_CAP_XEN_HVM ioctl sets the
KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE flag.
-8.31 KVM_CAP_PPC_MULTITCE
--------------------------
+8.31 KVM_CAP_SPAPR_MULTITCE
+---------------------------
-:Capability: KVM_CAP_PPC_MULTITCE
:Architectures: ppc
:Type: vm
@@ -8882,72 +8906,9 @@ This capability indicates that the KVM virtual PTP service is
supported in the host. A VMM can check whether the service is
available to the guest on migration.
-8.33 KVM_CAP_HYPERV_ENFORCE_CPUID
----------------------------------
-
-Architectures: x86
-
-When enabled, KVM will disable emulated Hyper-V features provided to the
-guest according to the bits Hyper-V CPUID feature leaves. Otherwise, all
-currently implemented Hyper-V features are provided unconditionally when
-Hyper-V identification is set in the HYPERV_CPUID_INTERFACE (0x40000001)
-leaf.
-
-8.34 KVM_CAP_EXIT_HYPERCALL
----------------------------
-
-:Capability: KVM_CAP_EXIT_HYPERCALL
-:Architectures: x86
-:Type: vm
-
-This capability, if enabled, will cause KVM to exit to userspace
-with KVM_EXIT_HYPERCALL exit reason to process some hypercalls.
-
-Calling KVM_CHECK_EXTENSION for this capability will return a bitmask
-of hypercalls that can be configured to exit to userspace.
-Right now, the only such hypercall is KVM_HC_MAP_GPA_RANGE.
-
-The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset
-of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
-the hypercalls whose corresponding bit is in the argument, and return
-ENOSYS for the others.
-
-8.35 KVM_CAP_PMU_CAPABILITY
----------------------------
-
-:Capability: KVM_CAP_PMU_CAPABILITY
-:Architectures: x86
-:Type: vm
-:Parameters: arg[0] is bitmask of PMU virtualization capabilities.
-:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits
-
-This capability alters PMU virtualization in KVM.
-
-Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of
-PMU virtualization capabilities that can be adjusted on a VM.
-
-The argument to KVM_ENABLE_CAP is also a bitmask and selects specific
-PMU virtualization capabilities to be applied to the VM. This can
-only be invoked on a VM prior to the creation of VCPUs.
-
-At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
-this capability will disable PMU virtualization for that VM. Usermode
-should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
-
-8.36 KVM_CAP_ARM_SYSTEM_SUSPEND
--------------------------------
-
-:Capability: KVM_CAP_ARM_SYSTEM_SUSPEND
-:Architectures: arm64
-:Type: vm
-
-When enabled, KVM will exit to userspace with KVM_EXIT_SYSTEM_EVENT of
-type KVM_SYSTEM_EVENT_SUSPEND to process the guest suspend request.
-
8.37 KVM_CAP_S390_PROTECTED_DUMP
--------------------------------
-:Capability: KVM_CAP_S390_PROTECTED_DUMP
:Architectures: s390
:Type: vm
@@ -8957,27 +8918,9 @@ PV guests. The `KVM_PV_DUMP` command is available for the
dump related UV data. Also the vcpu ioctl `KVM_S390_PV_CPU_COMMAND` is
available and supports the `KVM_PV_DUMP_CPU` subcommand.
-8.38 KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
--------------------------------------
-
-:Capability: KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
-:Architectures: x86
-:Type: vm
-:Parameters: arg[0] must be 0.
-:Returns: 0 on success, -EPERM if the userspace process does not
- have CAP_SYS_BOOT, -EINVAL if args[0] is not 0 or any vCPUs have been
- created.
-
-This capability disables the NX huge pages mitigation for iTLB MULTIHIT.
-
-The capability has no effect if the nx_huge_pages module parameter is not set.
-
-This capability may only be set before any vCPUs are created.
-
8.39 KVM_CAP_S390_CPU_TOPOLOGY
------------------------------
-:Capability: KVM_CAP_S390_CPU_TOPOLOGY
:Architectures: s390
:Type: vm
@@ -8999,37 +8942,9 @@ structure.
When getting the Modified Change Topology Report value, the attr->addr
must point to a byte where the value will be stored or retrieved from.
-8.40 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
----------------------------------------
-
-:Capability: KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
-:Architectures: arm64
-:Type: vm
-:Parameters: arg[0] is the new split chunk size.
-:Returns: 0 on success, -EINVAL if any memslot was already created.
-
-This capability sets the chunk size used in Eager Page Splitting.
-
-Eager Page Splitting improves the performance of dirty-logging (used
-in live migrations) when guest memory is backed by huge-pages. It
-avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
-it eagerly when enabling dirty logging (with the
-KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
-KVM_CLEAR_DIRTY_LOG.
-
-The chunk size specifies how many pages to break at a time, using a
-single allocation for each chunk. Bigger the chunk size, more pages
-need to be allocated ahead of time.
-
-The chunk size needs to be a valid block size. The list of acceptable
-block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
-64-bit bitmap (each bit describing a block size). The default value is
-0, to disable the eager page splitting.
-
8.41 KVM_CAP_VM_TYPES
---------------------
-:Capability: KVM_CAP_MEMORY_ATTRIBUTES
:Architectures: x86
:Type: system ioctl
@@ -9046,6 +8961,67 @@ Do not use KVM_X86_SW_PROTECTED_VM for "real" VMs, and especially not in
production. The behavior and effective ABI for software-protected VMs is
unstable.
+8.42 KVM_CAP_PPC_RPT_INVALIDATE
+-------------------------------
+
+:Architectures: ppc
+
+This capability indicates that the kernel is capable of handling
+H_RPT_INVALIDATE hcall.
+
+In order to enable the use of H_RPT_INVALIDATE in the guest,
+user space might have to advertise it for the guest. For example,
+IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
+present in the "ibm,hypertas-functions" device-tree property.
+
+This capability is enabled for hypervisors on platforms like POWER9
+that support radix MMU.
+
+8.43 KVM_CAP_PPC_AIL_MODE_3
+---------------------------
+
+:Architectures: ppc
+
+This capability indicates that the kernel supports the mode 3 setting for the
+"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
+resource that is controlled with the H_SET_MODE hypercall.
+
+This capability allows a guest kernel to use a better-performance mode for
+handling interrupts and system calls.
+
+8.44 KVM_CAP_MEMORY_FAULT_INFO
+------------------------------
+
+:Architectures: x86
+
+The presence of this capability indicates that KVM_RUN will fill
+kvm_run.memory_fault if KVM cannot resolve a guest page fault VM-Exit, e.g. if
+there is a valid memslot but no backing VMA for the corresponding host virtual
+address.
+
+The information in kvm_run.memory_fault is valid if and only if KVM_RUN returns
+an error with errno=EFAULT or errno=EHWPOISON *and* kvm_run.exit_reason is set
+to KVM_EXIT_MEMORY_FAULT.
+
+Note: Userspaces which attempt to resolve memory faults so that they can retry
+KVM_RUN are encouraged to guard against repeatedly receiving the same
+error/annotated fault.
+
+See KVM_EXIT_MEMORY_FAULT for more information.
+
+8.45 KVM_CAP_X86_GUEST_MODE
+---------------------------
+
+:Architectures: x86
+
+The presence of this capability indicates that KVM_RUN will update the
+KVM_RUN_X86_GUEST_MODE bit in kvm_run.flags to indicate whether the
+vCPU was executing nested guest code when it exited.
+
+KVM exits with the register state of either the L1 or L2 guest
+depending on which executed at the time of an exit. Userspace must
+take care to differentiate between these cases.
+
9. Known KVM API problems
=========================
@@ -9076,9 +9052,10 @@ the local APIC.
The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature.
-CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``.
-It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
-has enabled in-kernel emulation of the local APIC.
+On older versions of Linux, CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by
+``KVM_GET_SUPPORTED_CPUID``, but it can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER``
+is present and the kernel has enabled in-kernel emulation of the local APIC.
+On newer versions, ``KVM_GET_SUPPORTED_CPUID`` does report the bit as available.
CPU topology
~~~~~~~~~~~~
diff --git a/MAINTAINERS b/MAINTAINERS
index a05d21fad0f7..251b1f3d0e92 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10152,6 +10152,8 @@ F: include/linux/gpio.h
F: include/linux/gpio/
F: include/linux/of_gpio.h
K: (devm_)?gpio_(request|free|direction|get|set)
+K: GPIOD_FLAGS_BIT_NONEXCLUSIVE
+K: devm_gpiod_unhinge
GPIO UAPI
M: Bartosz Golaszewski <brgl@bgdev.pl>
@@ -11918,13 +11920,10 @@ F: drivers/gpio/gpio-tangier.c
F: drivers/gpio/gpio-tangier.h
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
-M: Zhenyu Wang <zhenyuw.linux@gmail.com>
-M: Zhi Wang <zhi.wang.linux@gmail.com>
-L: intel-gvt-dev@lists.freedesktop.org
-L: intel-gfx@lists.freedesktop.org
-S: Supported
+R: Zhenyu Wang <zhenyuw.linux@gmail.com>
+R: Zhi Wang <zhi.wang.linux@gmail.com>
+S: Odd Fixes
W: https://github.com/intel/gvt-linux/wiki
-T: git https://github.com/intel/gvt-linux.git
F: drivers/gpu/drm/i915/gvt/
INTEL HID EVENT DRIVER
diff --git a/Makefile b/Makefile
index 38689a0c3605..c91afd55099e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1068,6 +1068,9 @@ ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -fconserve-stack
endif
+# Ensure compilers do not transform certain loops into calls to wcslen()
+KBUILD_CFLAGS += -fno-builtin-wcslen
+
# change __FILE__ to the relative path to the source directory
ifdef building_out_of_srctree
KBUILD_CPPFLAGS += $(call cc-option,-ffile-prefix-map=$(srcroot)/=)
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index f2596a1b2f7d..ff13e1ecf4bb 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -232,7 +232,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_DEV_ATMEL_AES=y
CONFIG_CRYPTO_DEV_ATMEL_TDES=y
CONFIG_CRYPTO_DEV_ATMEL_SHA=y
-CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_ACORN_8x8=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 42cb1c854118..578c6a4af620 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -78,7 +78,6 @@ CONFIG_ROMFS_FS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_MINI_4x6=y
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 3474e475373a..70b8c78386f4 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -249,7 +249,6 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
CONFIG_DMA_CMA=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_RT_MUTEXES=y
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index b382a2e175fb..d76eb12d29a7 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -128,7 +128,6 @@ CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 7ad48fdda1da..e81a5d6c1c20 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -370,7 +370,6 @@ CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_DEV_EXYNOS_RNG=y
CONFIG_CRYPTO_DEV_S5P=y
-CONFIG_CRC_CCITT=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=96
CONFIG_FONTS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 297c6a7b978a..062c1eb8dd60 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -481,8 +481,6 @@ CONFIG_SECURITYFS=y
CONFIG_CRYPTO_DEV_FSL_CAAM=y
CONFIG_CRYPTO_DEV_SAHARA=y
CONFIG_CRYPTO_DEV_MXS_DCP=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index 2aa2ac8c6507..2d489186e945 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -147,7 +147,6 @@ CONFIG_EXT2_FS=y
# CONFIG_INOTIFY_USER is not set
CONFIG_JFFS2_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_CRC_ITU_T=y
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 98e267213b21..9afccd76446b 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -179,7 +179,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
index acd16204f8d7..275ddf7a3a14 100644
--- a/arch/arm/configs/milbeaut_m10v_defconfig
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -108,8 +108,6 @@ CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_ITU_T=m
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index f6f9e135353e..842a989baa27 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -67,7 +67,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/multi_v4t_defconfig b/arch/arm/configs/multi_v4t_defconfig
index 27d650635d9b..1a86dc305523 100644
--- a/arch/arm/configs/multi_v4t_defconfig
+++ b/arch/arm/configs/multi_v4t_defconfig
@@ -91,6 +91,5 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_CRAMFS=y
CONFIG_MINIX_FS=y
-CONFIG_CRC_CCITT=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index db81862bdb93..cf6180b4296e 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -289,7 +289,6 @@ CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_CCITT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index a518d4a2581e..23dbb80fcc2e 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -187,7 +187,6 @@ CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_CCITT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index d8a6e43c401e..c76d66135abb 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -160,7 +160,6 @@ CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_CRYPTO_DEV_MXS_DCP=y
-CONFIG_CRC_ITU_T=m
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 113d6dfe5243..75b326bc7830 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -706,9 +706,6 @@ CONFIG_CRYPTO_DEV_OMAP=m
CONFIG_CRYPTO_DEV_OMAP_SHAM=m
CONFIG_CRYPTO_DEV_OMAP_AES=m
CONFIG_CRYPTO_DEV_OMAP_DES=m
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=y
CONFIG_DMA_CMA=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 0629b088a584..62b9c6102789 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -136,7 +136,6 @@ CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
-CONFIG_CRC_T10DIF=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/pxa168_defconfig b/arch/arm/configs/pxa168_defconfig
index ce10fe2104bf..4748c7d33cb8 100644
--- a/arch/arm/configs/pxa168_defconfig
+++ b/arch/arm/configs/pxa168_defconfig
@@ -41,7 +41,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/pxa910_defconfig b/arch/arm/configs/pxa910_defconfig
index 1f28aea86014..49b59c600ae1 100644
--- a/arch/arm/configs/pxa910_defconfig
+++ b/arch/arm/configs/pxa910_defconfig
@@ -50,7 +50,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index de0ac8f521d7..24fca8608554 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -663,8 +663,6 @@ CONFIG_CRYPTO_SHA1_ARM=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=m
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index 5dbe85c263de..02121eec3658 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -113,7 +113,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_CCITT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig
index ea7ddf640ba7..e14720a9a5ac 100644
--- a/arch/arm/configs/sama7_defconfig
+++ b/arch/arm/configs/sama7_defconfig
@@ -227,8 +227,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_DEV_ATMEL_AES=y
CONFIG_CRYPTO_DEV_ATMEL_TDES=y
CONFIG_CRYPTO_DEV_ATMEL_SHA=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_ITU_T=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_CMA_ALIGNMENT=9
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index ac5b7a5aaff6..ffec59e3f49c 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -234,7 +234,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 423bb41c4225..dcd9c316072e 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -74,7 +74,6 @@ CONFIG_EXT3_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_NLS=y
-CONFIG_CRC_ITU_T=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/wpcm450_defconfig b/arch/arm/configs/wpcm450_defconfig
index 5e4397f7f828..cd4b3e70ff68 100644
--- a/arch/arm/configs/wpcm450_defconfig
+++ b/arch/arm/configs/wpcm450_defconfig
@@ -191,8 +191,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
CONFIG_X509_CERTIFICATE_PARSER=y
CONFIG_PKCS7_MESSAGE_PARSER=y
CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_ITU_T=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d1b1a33f9a8b..e4f77757937e 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -121,6 +121,15 @@
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+#define ESR_ELx_FSC_ADDRSZ (0x00)
+
+/*
+ * Annoyingly, the negative levels for Address size faults aren't laid out
+ * contiguously (or in the desired order)
+ */
+#define ESR_ELx_FSC_ADDRSZ_nL(n) ((n) == -1 ? 0x25 : 0x2C)
+#define ESR_ELx_FSC_ADDRSZ_L(n) ((n) < 0 ? ESR_ELx_FSC_ADDRSZ_nL(n) : \
+ (ESR_ELx_FSC_ADDRSZ + (n)))
/* Status codes for individual page table levels */
#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n))
@@ -161,8 +170,6 @@
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
/* ISS field definitions for exceptions taken in to Hyp */
-#define ESR_ELx_FSC_ADDRSZ (0x00)
-#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n))
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
@@ -464,6 +471,39 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
(esr == ESR_ELx_FSC_ACCESS_L(0));
}
+static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
+{
+ esr &= ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_ADDRSZ_L(3)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(2)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(1)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(0)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(-1));
+}
+
+static inline bool esr_fsc_is_sea_ttw(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_SEA_TTW(3)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(2)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(1)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(0)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(-1));
+}
+
+static inline bool esr_fsc_is_secc_ttw(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_SECC_TTW(3)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(2)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(1)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(0)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(-1));
+}
+
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
static inline bool esr_iss_is_eretax(unsigned long esr)
{
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d7cf66573aca..bd020fc28aa9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -305,7 +305,12 @@ static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vc
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{
- return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
+ u64 hpfar = vcpu->arch.fault.hpfar_el2;
+
+ if (unlikely(!(hpfar & HPFAR_EL2_NS)))
+ return INVALID_GPA;
+
+ return FIELD_GET(HPFAR_EL2_FIPA, hpfar) << 12;
}
static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h
index 87e10d9a635b..9398ade632aa 100644
--- a/arch/arm64/include/asm/kvm_ras.h
+++ b/arch/arm64/include/asm/kvm_ras.h
@@ -14,7 +14,7 @@
* Was this synchronous external abort a RAS notification?
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
*/
-static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr)
+static inline int kvm_handle_guest_sea(void)
{
/* apei_claim_sea(NULL) expects to mask interrupts itself */
lockdep_assert_irqs_enabled();
diff --git a/arch/arm64/include/asm/rqspinlock.h b/arch/arm64/include/asm/rqspinlock.h
index 5b80785324b6..9ea0a74e5892 100644
--- a/arch/arm64/include/asm/rqspinlock.h
+++ b/arch/arm64/include/asm/rqspinlock.h
@@ -86,7 +86,7 @@
#endif
-#define res_smp_cond_load_acquire_timewait(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
+#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
#include <asm-generic/rqspinlock.h>
diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h
index 17df94570f03..fc573fc767b0 100644
--- a/arch/arm64/kvm/hyp/include/hyp/fault.h
+++ b/arch/arm64/kvm/hyp/include/hyp/fault.h
@@ -12,6 +12,16 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+static inline bool __fault_safe_to_translate(u64 esr)
+{
+ u64 fsc = esr & ESR_ELx_FSC;
+
+ if (esr_fsc_is_sea_ttw(esr) || esr_fsc_is_secc_ttw(esr))
+ return false;
+
+ return !(fsc == ESR_ELx_FSC_EXTABT && (esr & ESR_ELx_FnV));
+}
+
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
int ret;
@@ -44,34 +54,50 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
return true;
}
-static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
+/*
+ * Checks for the conditions when HPFAR_EL2 is written, per ARM ARM R_FKLWR.
+ */
+static inline bool __hpfar_valid(u64 esr)
{
- u64 hpfar, far;
-
- far = read_sysreg_el2(SYS_FAR);
-
/*
- * The HPFAR can be invalid if the stage 2 fault did not
- * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
- * bit is clear) and one of the two following cases are true:
- * 1. The fault was due to a permission fault
- * 2. The processor carries errata 834220
+ * CPUs affected by ARM erratum #834220 may incorrectly report a
+ * stage-2 translation fault when a stage-1 permission fault occurs.
*
- * Therefore, for all non S1PTW faults where we either have a
- * permission fault or the errata workaround is enabled, we
- * resolve the IPA using the AT instruction.
+ * Re-walk the page tables to determine if a stage-1 fault actually
+ * occurred.
*/
- if (!(esr & ESR_ELx_S1PTW) &&
- (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
- esr_fsc_is_permission_fault(esr))) {
- if (!__translate_far_to_hpfar(far, &hpfar))
- return false;
- } else {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_834220) &&
+ esr_fsc_is_translation_fault(esr))
+ return false;
+
+ if (esr_fsc_is_translation_fault(esr) || esr_fsc_is_access_flag_fault(esr))
+ return true;
+
+ if ((esr & ESR_ELx_S1PTW) && esr_fsc_is_permission_fault(esr))
+ return true;
+
+ return esr_fsc_is_addr_sz_fault(esr);
+}
+
+static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
+{
+ u64 hpfar;
+
+ fault->far_el2 = read_sysreg_el2(SYS_FAR);
+ fault->hpfar_el2 = 0;
+
+ if (__hpfar_valid(esr))
hpfar = read_sysreg(hpfar_el2);
- }
+ else if (unlikely(!__fault_safe_to_translate(esr)))
+ return true;
+ else if (!__translate_far_to_hpfar(fault->far_el2, &hpfar))
+ return false;
- fault->far_el2 = far;
- fault->hpfar_el2 = hpfar;
+ /*
+ * Hijack HPFAR_EL2.NS (RES0 in Non-secure) to indicate a valid
+ * HPFAR value.
+ */
+ fault->hpfar_el2 = hpfar | HPFAR_EL2_NS;
return true;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index e433dfab882a..3369dd0c4009 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -730,10 +730,10 @@ static void do_ffa_version(struct arm_smccc_res *res,
hyp_ffa_version = ffa_req_version;
}
- if (hyp_ffa_post_init())
+ if (hyp_ffa_post_init()) {
res->a0 = FFA_RET_NOT_SUPPORTED;
- else {
- has_version_negotiated = true;
+ } else {
+ smp_store_release(&has_version_negotiated, true);
res->a0 = hyp_ffa_version;
}
unlock:
@@ -809,7 +809,8 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
if (!is_ffa_call(func_id))
return false;
- if (!has_version_negotiated && func_id != FFA_VERSION) {
+ if (func_id != FFA_VERSION &&
+ !smp_load_acquire(&has_version_negotiated)) {
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
goto out_handled;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index f34f11c720d7..2a5284f749b4 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -578,7 +578,14 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
return;
}
- addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
+
+ /*
+ * Yikes, we couldn't resolve the fault IPA. This should reinject an
+ * abort into the host when we figure out how to do that.
+ */
+ BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS));
+ addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12;
+
ret = host_stage2_idmap(addr);
BUG_ON(ret && ret != -EAGAIN);
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 2feb6c6b63af..754f2fe0cc67 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1794,9 +1794,28 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
gfn_t gfn;
int ret, idx;
+ /* Synchronous External Abort? */
+ if (kvm_vcpu_abt_issea(vcpu)) {
+ /*
+ * For RAS the host kernel may handle this abort.
+ * There is no need to pass the error into the guest.
+ */
+ if (kvm_handle_guest_sea())
+ kvm_inject_vabt(vcpu);
+
+ return 1;
+ }
+
esr = kvm_vcpu_get_esr(vcpu);
+ /*
+ * The fault IPA should be reliable at this point as we're not dealing
+ * with an SEA.
+ */
ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+ if (KVM_BUG_ON(ipa == INVALID_GPA, vcpu->kvm))
+ return -EFAULT;
+
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
if (esr_fsc_is_translation_fault(esr)) {
@@ -1818,18 +1837,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
}
}
- /* Synchronous External Abort? */
- if (kvm_vcpu_abt_issea(vcpu)) {
- /*
- * For RAS the host kernel may handle this abort.
- * There is no need to pass the error into the guest.
- */
- if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
- kvm_inject_vabt(vcpu);
-
- return 1;
- }
-
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
kvm_vcpu_get_hfar(vcpu), fault_ipa);
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index f9476848a2ed..bdf044c5d11b 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -3536,3 +3536,10 @@ Field 5 F
Field 4 P
Field 3:0 Align
EndSysreg
+
+Sysreg HPFAR_EL2 3 4 6 0 4
+Field 63 NS
+Res0 62:48
+Field 47:4 FIPA
+Res0 3:0
+EndSysreg
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
index 469c025297c6..c6108f000288 100644
--- a/arch/hexagon/configs/comet_defconfig
+++ b/arch/hexagon/configs/comet_defconfig
@@ -72,9 +72,6 @@ CONFIG_INET=y
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=y
CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 67a0d157122d..110279a64aa4 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -89,4 +89,3 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_CRYPTO_ECHAINIV is not set
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 8caa03a41327..cba0b85c6707 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -82,7 +82,6 @@ CONFIG_LEDS_GPIO=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_CRC_ITU_T=m
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index fe282630b51c..8f7c36868204 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -238,7 +238,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 5ab149cd3178..114fcd67898d 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -218,4 +218,3 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 31ca93d3acc5..f1a8ccf2c459 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -326,5 +326,4 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index b8907b3d7a33..5d079941fd20 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -317,4 +317,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip30_defconfig b/arch/mips/configs/ip30_defconfig
index 270181a7320a..a4524e785469 100644
--- a/arch/mips/configs/ip30_defconfig
+++ b/arch/mips/configs/ip30_defconfig
@@ -179,4 +179,3 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 121e7e48fa77..d8ac11427f69 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -177,7 +177,6 @@ CONFIG_CRYPTO_SERPENT=y
CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig
index 128f9abab7fc..e2bcdfd290a1 100644
--- a/arch/mips/configs/omega2p_defconfig
+++ b/arch/mips/configs/omega2p_defconfig
@@ -111,7 +111,6 @@ CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-CONFIG_CRC16=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 0261969a6e45..42b161d587c7 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -155,5 +155,4 @@ CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
CONFIG_CRYPTO_TEST=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index 8404e0a9d8b2..8f9701efef19 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -128,7 +128,6 @@ CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRC_ITU_T=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
index ce855b644bb0..ae2afff00e01 100644
--- a/arch/mips/configs/sb1250_swarm_defconfig
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -99,4 +99,3 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig
index 917967fed45f..2a9a9b12847d 100644
--- a/arch/mips/configs/vocore2_defconfig
+++ b/arch/mips/configs/vocore2_defconfig
@@ -111,7 +111,6 @@ CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-CONFIG_CRC16=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index 7b91edfe3e07..aae8497b6872 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -140,7 +140,6 @@ CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRC_ITU_T=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index f5fffc24c3bc..5b65c9859613 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -264,8 +264,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 2487765b7be3..ecc9ffcc11cd 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -292,7 +292,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 2479ab62d12f..98221bda380d 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -91,5 +91,4 @@ CONFIG_AFFS_FS=m
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 20891c413149..5757625469c4 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -85,8 +85,6 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 1715ff547442..b99caba8724a 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -73,6 +73,5 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ISO8859_8=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index e65c0057147f..11163052fdba 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -80,5 +80,4 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 17714bf0ed40..312d39e4242c 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -72,5 +72,4 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index 58fae5131fa7..ac27f99faab8 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -75,6 +75,5 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index 6f58ee1edf1f..7beb36a41d45 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -221,8 +221,6 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_MD5=y
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index e7080497048d..0a42072fa23c 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -60,8 +60,6 @@ CONFIG_CRAMFS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=m
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_BDI_SWITCH=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 3a6381aa9fdc..488d03ae6d6c 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -132,7 +132,6 @@ CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_HMAC=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index 0cb24b33c88e..e7bd265fae5a 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -5,7 +5,6 @@ CONFIG_BROADCOM_PHY=y
# CONFIG_CARDBUS is not set
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_ST=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_DS1682=y
CONFIG_EEPROM_LEGACY=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 200bb1ecb560..69ef3dc31c4b 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -106,7 +106,6 @@ CONFIG_TMPFS=y
CONFIG_AFFS_FS=m
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index fb314f75ad4b..b799c95480ae 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -110,7 +110,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index d6d2a458847b..2f81bc2d819e 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -15,7 +15,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUPS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_CRC_T10DIF=y
CONFIG_CPUSETS=y
CONFIG_CRAMFS=y
CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 9215bed53291..7e58f3e6c987 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -231,7 +231,6 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index d77eeb525366..cdd99657b71b 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -82,7 +82,6 @@ CONFIG_ROOT_NFS=y
CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index fa707de761be..b564f9e33a0d 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -125,8 +125,6 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 83c4710017e9..a815d9e5e3e8 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -97,7 +97,6 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index a0d27c59ea78..dfbdd5e8e108 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -38,4 +38,3 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index d1c7fd5bf34b..fa2b3b9c5945 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -107,8 +107,6 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_XZ_DEC=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 61993944db40..8bbf51b38480 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -159,7 +159,6 @@ CONFIG_NFSD=y
CONFIG_NFSD_V4=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index e8b3f67bf3f5..1bc3466bc909 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -276,7 +276,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 8b595f67068c..41c930f74ed4 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -90,7 +90,6 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_CRC_T10DIF=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4c05f4e4d505..d2e659a2d8cb 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -207,7 +207,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 2b175ddf82f0..0b48d2b776c4 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -148,8 +148,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 3086c4a12d6d..2b71a6dc399e 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -278,8 +278,6 @@ CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY=y
# CONFIG_INTEGRITY is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
-CONFIG_CRC_ITU_T=y
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 7a978d396991..e415222bd839 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -75,4 +75,3 @@ CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRC_T10DIF=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 5017a697b67b..7c714a19221e 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -114,7 +114,6 @@ CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index db8161ebb43c..99fb986fca6e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -332,6 +332,10 @@ config HAVE_MARCH_Z16_FEATURES
def_bool n
select HAVE_MARCH_Z15_FEATURES
+config HAVE_MARCH_Z17_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z16_FEATURES
+
choice
prompt "Processor type"
default MARCH_Z196
@@ -397,6 +401,14 @@ config MARCH_Z16
Select this to enable optimizations for IBM z16 (3931 and
3932 series).
+config MARCH_Z17
+ bool "IBM z17"
+ select HAVE_MARCH_Z17_FEATURES
+ depends on $(cc-option,-march=z17)
+ help
+ Select this to enable optimizations for IBM z17 (9175 and
+ 9176 series).
+
endchoice
config MARCH_Z10_TUNE
@@ -420,6 +432,9 @@ config MARCH_Z15_TUNE
config MARCH_Z16_TUNE
def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+config MARCH_Z17_TUNE
+ def_bool TUNE_Z17 || MARCH_Z17 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -464,6 +479,10 @@ config TUNE_Z16
bool "IBM z16"
depends on $(cc-option,-mtune=z16)
+config TUNE_Z17
+ bool "IBM z17"
+ depends on $(cc-option,-mtune=z17)
+
endchoice
config 64BIT
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b06dc53bfed5..7679bc16b692 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -48,6 +48,7 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
mflags-$(CONFIG_MARCH_Z14) := -march=z14
mflags-$(CONFIG_MARCH_Z15) := -march=z15
mflags-$(CONFIG_MARCH_Z16) := -march=z16
+mflags-$(CONFIG_MARCH_Z17) := -march=z17
export CC_FLAGS_MARCH := $(mflags-y)
@@ -61,6 +62,7 @@ cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
cflags-$(CONFIG_MARCH_Z15_TUNE) += -mtune=z15
cflags-$(CONFIG_MARCH_Z16_TUNE) += -mtune=z16
+cflags-$(CONFIG_MARCH_Z17_TUNE) += -mtune=z17
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
index fd9eef3be44c..11a71bd14954 100644
--- a/arch/s390/include/asm/march.h
+++ b/arch/s390/include/asm/march.h
@@ -33,6 +33,10 @@
#define MARCH_HAS_Z16_FEATURES 1
#endif
+#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
+#define MARCH_HAS_Z17_FEATURES 1
+#endif
+
#endif /* __DECOMPRESSOR */
#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 33205dd410e4..e657fad7e376 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -442,7 +442,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
ctrset_size = 48;
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 128;
- else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
+ else if (cpumf_ctr_info.csvn >= 6 && cpumf_ctr_info.csvn <= 8)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
unsigned int type = event->attr.type;
- int err;
+ int err = -ENOENT;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
/* Registered as unknown PMU */
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
- else
- return -ENOENT;
-
- if (unlikely(err) && event->destroy)
- event->destroy(event);
return err;
}
@@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy;
err = cfdiag_event_init2(event);
- if (unlikely(err))
- event->destroy(event);
out:
return err;
}
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index e4a6bfc91080..690a293eb10d 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -237,7 +237,6 @@ CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
-
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
@@ -365,6 +364,83 @@ CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z17, L1D_RO_EXCL_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z17, CRSTE_1MB_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z17, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z17, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z17, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z17, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ, 0x0091);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_CHIP_HIT, 0x0093);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_DRAWER_HIT, 0x0094);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP, 0x0095);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_IV, 0x0096);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_CHIP_HIT, 0x0097);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE, 0x0099);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER, 0x009a);
+CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER, 0x009b);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_MEMORY, 0x009c);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE_MEMORY, 0x009d);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER_MEMORY, 0x009e);
+CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER_MEMORY, 0x009f);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_IV, 0x00a0);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_IV, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_IV, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_CHIP_HIT, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_DRAWER_HIT, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_IV, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_CHIP_HIT, 0x00af);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_MODULE, 0x00b1);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_DRAWER, 0x00b2);
+CPUMF_EVENT_ATTR(cf_z17, ICW_OFF_DRAWER, 0x00b3);
+CPUMF_EVENT_ATTR(cf_z17, CYCLES_SAMETHRD, 0x00ca);
+CPUMF_EVENT_ATTR(cf_z17, CYCLES_DIFFTHRD, 0x00cb);
+CPUMF_EVENT_ATTR(cf_z17, INST_SAMETHRD, 0x00cc);
+CPUMF_EVENT_ATTR(cf_z17, INST_DIFFTHRD, 0x00cd);
+CPUMF_EVENT_ATTR(cf_z17, WRONG_BRANCH_PREDICTION, 0x00ce);
+CPUMF_EVENT_ATTR(cf_z17, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z17, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z17, LAST_HOST_TRANSLATIONS, 0x00e8);
+CPUMF_EVENT_ATTR(cf_z17, TX_NC_TABORT, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_NO_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_SPECIAL, 0x00f6);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_ACCESS, 0x00f8);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CYCLES, 0x00fd);
+CPUMF_EVENT_ATTR(cf_z17, SORTL, 0x0100);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CC, 0x0109);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CCFINISH, 0x010a);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INVOCATIONS, 0x010b);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPLETIONS, 0x010c);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_WAIT_LOCK, 0x010d);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_HOLD_LOCK, 0x010e);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_ONCHIP, 0x0110);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_OFFCHIP, 0x0111);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_DIFF, 0x0112);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_4K_PREFETCH, 0x0114);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPL_LOCK, 0x0115);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK, 0x0116);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO, 0x0117);
+CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
@@ -414,7 +490,7 @@ static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
NULL,
};
-static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
+static struct attribute *cpumcf_svn_678_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
@@ -779,6 +855,87 @@ static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z17_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z17, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, CRSTE_1MB_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z17, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z17, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, ICW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, CYCLES_SAMETHRD),
+ CPUMF_EVENT_PTR(cf_z17, CYCLES_DIFFTHRD),
+ CPUMF_EVENT_PTR(cf_z17, INST_SAMETHRD),
+ CPUMF_EVENT_PTR(cf_z17, INST_DIFFTHRD),
+ CPUMF_EVENT_PTR(cf_z17, WRONG_BRANCH_PREDICTION),
+ CPUMF_EVENT_PTR(cf_z17, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z17, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z17, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z17, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_ACCESS),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CYCLES),
+ CPUMF_EVENT_PTR(cf_z17, SORTL),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CC),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CCFINISH),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INVOCATIONS),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_COMPLETIONS),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_WAIT_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_HOLD_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_ONCHIP),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_OFFCHIP),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_DIFF),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_4K_PREFETCH),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_COMPL_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO),
+ CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -859,7 +1016,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
if (ci.csvn >= 1 && ci.csvn <= 5)
csvn = cpumcf_svn_12345_pmu_event_attr;
else if (ci.csvn >= 6)
- csvn = cpumcf_svn_67_pmu_event_attr;
+ csvn = cpumcf_svn_678_pmu_event_attr;
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
@@ -892,6 +1049,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 0x3932:
model = cpumcf_z16_pmu_event_attr;
break;
+ case 0x9175:
+ case 0x9176:
+ model = cpumcf_z17_pmu_event_attr;
+ break;
default:
model = none;
break;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 5f60248cb468..ad22799d8a7d 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -885,9 +885,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
event->attr.exclude_idle = 0;
err = __hw_perf_event_init(event);
- if (unlikely(err))
- if (event->destroy)
- event->destroy(event);
return err;
}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 54e281436a28..80b1f7a29f11 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -294,6 +294,10 @@ static int __init setup_elf_platform(void)
case 0x3932:
strcpy(elf_platform, "z16");
break;
+ case 0x9175:
+ case 0x9176:
+ strcpy(elf_platform, "z17");
+ break;
}
return 0;
}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 610dd44a948b..a06a000f196c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -95,7 +95,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
- KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
+ KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy,
current->pid, vcpu->kvm);
/* do not warn on invalid runtime instrumentation mode */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2811a6c093b8..60c360c18690 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm)
if (!gi->origin)
return;
gisa_clear_ipm(gi->origin);
- VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
+ VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
}
void kvm_s390_gisa_init(struct kvm *kvm)
@@ -3177,7 +3177,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
hrtimer_setup(&gi->timer, gisa_vcpu_kicker, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
- VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
+ VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
}
void kvm_s390_gisa_enable(struct kvm *kvm)
@@ -3218,7 +3218,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
process_gib_alert_list();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
- VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
+ VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
}
void kvm_s390_gisa_disable(struct kvm *kvm)
@@ -3467,7 +3467,7 @@ int __init kvm_s390_gib_init(u8 nisc)
}
}
- KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
+ KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
goto out;
out_unreg_gal:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fff863734975..3f3175193fd7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1022,7 +1022,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
- VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
+ VM_EVENT(kvm, 3, "New guest asce: 0x%p",
(void *) kvm->arch.gmap->asce);
break;
}
@@ -3466,7 +3466,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_gisa_init(kvm);
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
kvm->arch.pv.set_aside = NULL;
- KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
+ KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
return 0;
out_err:
@@ -3529,7 +3529,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
- KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
+ KVM_EVENT(3, "vm 0x%p destroyed", kvm);
}
/* Section: vcpu related */
@@ -3650,7 +3650,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
free_page((unsigned long)old_sca);
- VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
+ VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)",
old_sca, kvm->arch.sca);
return 0;
}
@@ -4027,7 +4027,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
goto out_free_sie_block;
}
- VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
+ VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 9ac92dbf680d..9e28f165c114 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu,
__entry->sie_block = sie_block;
),
- TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
+ TP_printk("create cpu %d at 0x%p, sie block at 0x%p",
__entry->id, __entry->vcpu, __entry->sie_block)
);
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css,
__entry->kvm = kvm;
),
- TP_printk("enabling channel I/O support (kvm @ %pK)\n",
+ TP_printk("enabling channel I/O support (kvm @ %p)\n",
__entry->kvm)
);
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 855f818deb98..d5c68ade71ab 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -54,6 +54,9 @@ static struct facility_def facility_defs[] = {
#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
61, /* miscellaneous-instruction-extension 3 */
#endif
+#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
+ 84, /* miscellaneous-instruction-extension 4 */
+#endif
-1 /* END */
}
},
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 4464a2ad42ed..b6f36c938f1d 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -99,4 +99,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index ee1b36682155..e76694aace25 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -128,4 +128,3 @@ CONFIG_DEBUG_FS=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig
index 296ed768cbbb..ee3f6db7d8da 100644
--- a/arch/sh/configs/edosk7705_defconfig
+++ b/arch/sh/configs/edosk7705_defconfig
@@ -33,4 +33,3 @@ CONFIG_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_PROC_FS is not set
# CONFIG_SYSFS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index 67716a44463e..da176f100e00 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -110,4 +110,3 @@ CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 77e3185f63e4..3582af15ad86 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -56,5 +56,3 @@ CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig
index 42bf34181a3e..88fbb65cb9f9 100644
--- a/arch/sh/configs/kfr2r09-romimage_defconfig
+++ b/arch/sh/configs/kfr2r09-romimage_defconfig
@@ -49,4 +49,3 @@ CONFIG_TMPFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index d871623955c5..924bb3233b0b 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -111,4 +111,3 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index 6a234761bfd7..0307bb2be79f 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -58,4 +58,3 @@ CONFIG_ROMFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index 8d443749550e..93b9aa32dc7c 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -86,5 +86,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_KOBJECT=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_FRAME_POINTER=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index 2d1e65cad239..fc2010c241fb 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -90,4 +90,3 @@ CONFIG_DEBUG_FS=y
CONFIG_CRYPTO_MANAGER=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 6bd6c0ae85d7..f28b8c4181c2 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -105,4 +105,3 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index cde668569cc1..3a4239f20ff1 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -103,4 +103,3 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index c863a11c7592..69568cc40396 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -87,4 +87,3 @@ CONFIG_MINIX_FS=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index 7e4f710d46c7..ecb4bdb5bb58 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -92,4 +92,3 @@ CONFIG_MINIX_FS=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index cd24cf08210e..9870d16d9711 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -136,4 +136,3 @@ CONFIG_SH_STANDARD_BIOS=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 472fdf365cad..64f9308ee586 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -101,6 +101,3 @@ CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_CRC_ITU_T=y
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 49a4961889de..8770a72e6a63 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -96,4 +96,3 @@ CONFIG_FRAME_POINTER=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index de293792db84..b15c6406a0e8 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -122,4 +122,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_FRAME_POINTER=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 96521271758c..9501e69eb886 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -128,4 +128,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 48f38ec236b6..4d75c92cac10 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -120,6 +120,5 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
index 1b1174a07e36..cc6292b3235a 100644
--- a/arch/sh/configs/sh2007_defconfig
+++ b/arch/sh/configs/sh2007_defconfig
@@ -193,5 +193,3 @@ CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index 5440bd0ca4ed..e6298f22623a 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -39,4 +39,3 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_SYSFS is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index 57923c3296cc..b77b3313157e 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -112,4 +112,3 @@ CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_T10DIF=y
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index 4338af8d02d0..2e2b46980b58 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -41,4 +41,3 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_SYSFS is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 8e85f205d8f5..f022ada363b5 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -264,4 +264,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 01b2bdfbf9a8..f1ba0fefe1f9 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -229,7 +229,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
CONFIG_VCC=m
CONFIG_PATA_CMD64X=y
CONFIG_IP_PNP=y
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index d3caa31240ed..175958b02f2b 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -17,19 +17,20 @@
.pushsection .noinstr.text, "ax"
-SYM_FUNC_START(entry_ibpb)
+/* Clobbers AX, CX, DX */
+SYM_FUNC_START(write_ibpb)
ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
- movl $PRED_CMD_IBPB, %eax
+ movl _ASM_RIP(x86_pred_cmd), %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(write_ibpb)
/* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(write_ibpb);
.popsection
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a884ab544335..3bdae454a959 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1472,8 +1472,13 @@ struct kvm_arch {
struct once nx_once;
#ifdef CONFIG_X86_64
- /* The number of TDP MMU pages across all roots. */
+#ifdef CONFIG_KVM_PROVE_MMU
+ /*
+ * The number of TDP MMU pages across all roots. Used only to sanity
+ * check that KVM isn't leaking TDP MMU pages.
+ */
atomic64_t tdp_mmu_pages;
+#endif
/*
* List of struct kvm_mmu_pages being used as roots.
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 8a5cc8e70439..5c43f145454d 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -269,7 +269,7 @@
* typically has NO_MELTDOWN).
*
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * write_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
@@ -279,7 +279,7 @@
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \
- "call entry_ibpb", \ibpb_feature, \
+ "call write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm
@@ -368,7 +368,7 @@ extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void write_ibpb(void);
#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
@@ -514,11 +514,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
: "memory");
}
-extern u64 x86_pred_cmd;
-
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
+ asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
+ : ASM_CALL_CONSTRAINT
+ :: "rax", "rcx", "rdx", "memory");
}
/* The Intel SPEC CTRL MSR base value cache */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 55a5e656e4b9..4f84d421d1cf 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
+ alternative("", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
+ alternative("", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -59,9 +59,9 @@ static __always_inline void smap_restore(unsigned long flags)
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#define ASM_CLAC_UNSAFE \
ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index dae6a73be40e..9fa321a95eb3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -23,6 +23,8 @@
#include <linux/serial_core.h>
#include <linux/pgtable.h>
+#include <xen/xen.h>
+
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
@@ -1729,6 +1731,15 @@ int __init acpi_mps_check(void)
{
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
+
+ /*
+ * Xen disables ACPI in PV DomU guests but it still emulates APIC and
+ * supports SMP. Returning early here ensures that APIC is not disabled
+ * unnecessarily and the guest is not limited to a single vCPU.
+ */
+ if (xen_pv_domain() && !xen_initial_domain())
+ return 0;
+
if (acpi_disabled || acpi_noirq) {
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 79569f72b8ee..a839ff506f45 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -805,6 +805,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static const struct x86_cpu_id erratum_1386_microcode[] = {
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e),
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052),
+ {}
};
static void fix_erratum_1386(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4386aa6c69e1..362602b705cc 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,6 @@ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
-EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init x86_arch_cap_msr;
@@ -1142,7 +1141,7 @@ do_cmd_auto:
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -1592,51 +1591,54 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
rrsba_disabled = true;
}
-static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{
/*
- * Similar to context switches, there are two types of RSB attacks
- * after VM exit:
+ * WARNING! There are many subtleties to consider when changing *any*
+ * code related to RSB-related mitigations. Before doing so, carefully
+ * read the following document, and update if necessary:
*
- * 1) RSB underflow
+ * Documentation/admin-guide/hw-vuln/rsb.rst
*
- * 2) Poisoned RSB entry
+ * In an overly simplified nutshell:
*
- * When retpoline is enabled, both are mitigated by filling/clearing
- * the RSB.
+ * - User->user RSB attacks are conditionally mitigated during
+ * context switches by cond_mitigation -> write_ibpb().
*
- * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
- * prediction isolation protections, RSB still needs to be cleared
- * because of #2. Note that SMEP provides no protection here, unlike
- * user-space-poisoned RSB entries.
+ * - User->kernel and guest->host attacks are mitigated by eIBRS or
+ * RSB filling.
*
- * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
- * bug is present then a LITE version of RSB protection is required,
- * just a single call needs to retire before a RET is executed.
+ * Though, depending on config, note that other alternative
+ * mitigations may end up getting used instead, e.g., IBPB on
+ * entry/vmexit, call depth tracking, or return thunks.
*/
+
switch (mode) {
case SPECTRE_V2_NONE:
- return;
+ break;
- case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
}
- return;
+ break;
- case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
+ pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
- return;
- }
+ break;
- pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
- dump_stack();
+ default:
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
+ dump_stack();
+ break;
+ }
}
/*
@@ -1830,48 +1832,7 @@ static void __init spectre_v2_select_mitigation(void)
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
- /*
- * If Spectre v2 protection has been enabled, fill the RSB during a
- * context switch. In general there are two types of RSB attacks
- * across context switches, for which the CALLs/RETs may be unbalanced.
- *
- * 1) RSB underflow
- *
- * Some Intel parts have "bottomless RSB". When the RSB is empty,
- * speculated return targets may come from the branch predictor,
- * which could have a user-poisoned BTB or BHB entry.
- *
- * AMD has it even worse: *all* returns are speculated from the BTB,
- * regardless of the state of the RSB.
- *
- * When IBRS or eIBRS is enabled, the "user -> kernel" attack
- * scenario is mitigated by the IBRS branch prediction isolation
- * properties, so the RSB buffer filling wouldn't be necessary to
- * protect against this type of attack.
- *
- * The "user -> user" attack scenario is mitigated by RSB filling.
- *
- * 2) Poisoned RSB entry
- *
- * If the 'next' in-kernel return stack is shorter than 'prev',
- * 'next' could be tricked into speculating with a user-poisoned RSB
- * entry.
- *
- * The "user -> kernel" attack scenario is mitigated by SMEP and
- * eIBRS.
- *
- * The "user -> user" scenario, also known as SpectreBHB, requires
- * RSB clearing.
- *
- * So to mitigate all cases, unconditionally fill RSB on context
- * switches.
- *
- * FIXME: Is this pointless for retbleed-affected AMD?
- */
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
-
- spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+ spectre_v2_select_rsb_mitigation(mode);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -2676,7 +2637,7 @@ static void __init srso_select_mitigation(void)
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -2701,7 +2662,7 @@ ibpb_on_vmexit:
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 93ec829015f1..cc4a54145c83 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -3553,6 +3553,22 @@ static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
free_rmid(rgrp->closid, rgrp->mon.rmid);
}
+/*
+ * We allow creating mon groups only with in a directory called "mon_groups"
+ * which is present in every ctrl_mon group. Check if this is a valid
+ * "mon_groups" directory.
+ *
+ * 1. The directory should be named "mon_groups".
+ * 2. The mon group itself should "not" be named "mon_groups".
+ * This makes sure "mon_groups" directory always has a ctrl_mon group
+ * as parent.
+ */
+static bool is_mon_groups(struct kernfs_node *kn, const char *name)
+{
+ return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
+ strcmp(name, "mon_groups"));
+}
+
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
const char *name, umode_t mode,
enum rdt_group_type rtype, struct rdtgroup **r)
@@ -3568,6 +3584,15 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ /*
+ * Check that the parent directory for a monitor group is a "mon_groups"
+ * directory.
+ */
+ if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
if (rtype == RDTMON_GROUP &&
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
@@ -3751,22 +3776,6 @@ out_unlock:
return ret;
}
-/*
- * We allow creating mon groups only with in a directory called "mon_groups"
- * which is present in every ctrl_mon group. Check if this is a valid
- * "mon_groups" directory.
- *
- * 1. The directory should be named "mon_groups".
- * 2. The mon group itself should "not" be named "mon_groups".
- * This makes sure "mon_groups" directory always has a ctrl_mon group
- * as parent.
- */
-static bool is_mon_groups(struct kernfs_node *kn, const char *name)
-{
- return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
- strcmp(name, "mon_groups"));
-}
-
static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
umode_t mode)
{
@@ -3782,11 +3791,8 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
- /*
- * If RDT monitoring is supported and the parent directory is a valid
- * "mon_groups" directory, add a monitoring subdirectory.
- */
- if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
+ /* Else, attempt to add a monitoring subdirectory. */
+ if (resctrl_arch_mon_capable())
return rdtgroup_mkdir_mon(parent_kn, name, mode);
return -EPERM;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 57120f0749cc..9d8dd8deb2a7 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn)
{
int i;
- unsigned long pfn = 0;
+ u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i];
- if (pfn < PFN_UP(entry->addr))
- register_nosave_region(pfn, PFN_UP(entry->addr));
-
- pfn = PFN_DOWN(entry->addr + entry->size);
-
if (entry->type != E820_TYPE_RAM)
- register_nosave_region(PFN_UP(entry->addr), pfn);
+ continue;
- if (pfn >= limit_pfn)
- break;
+ if (last_addr < entry->addr)
+ register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
+
+ last_addr = entry->addr + entry->size;
}
+
+ register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
}
#ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 611f27e3890c..3aad78bfcb26 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -389,10 +389,10 @@ static int __init setup_early_printk(char *buf)
keep = (strstr(buf, "keep") != NULL);
while (*buf != '\0') {
- if (!strncmp(buf, "mmio", 4)) {
- early_mmio_serial_init(buf + 4);
+ if (!strncmp(buf, "mmio32", 6)) {
+ buf += 6;
+ early_mmio_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 4;
}
if (!strncmp(buf, "serial", 6)) {
buf += 6;
@@ -407,9 +407,9 @@ static int __init setup_early_printk(char *buf)
}
#ifdef CONFIG_PCI
if (!strncmp(buf, "pciserial", 9)) {
- early_pci_serial_init(buf + 9);
+ buf += 9; /* Keep from match the above "pciserial" */
+ early_pci_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 9; /* Keep from match the above "serial" */
}
#endif
if (!strncmp(buf, "vga", 3) &&
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 5e4d4934c0d3..571c906ffcbf 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1427,8 +1427,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case 0xa: { /* Architectural Performance Monitoring */
- union cpuid10_eax eax;
- union cpuid10_edx edx;
+ union cpuid10_eax eax = { };
+ union cpuid10_edx edx = { };
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -1444,8 +1444,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (kvm_pmu_cap.version)
edx.split.anythread_deprecated = 1;
- edx.split.reserved1 = 0;
- edx.split.reserved2 = 0;
entry->eax = eax.full;
entry->ebx = kvm_pmu_cap.events_mask;
@@ -1763,7 +1761,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* AMD Extended Performance Monitoring and Debug */
case 0x80000022: {
- union cpuid_0x80000022_ebx ebx;
+ union cpuid_0x80000022_ebx ebx = { };
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7cc0564f5f97..21a3b8166242 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -40,7 +40,9 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);
kvm_tdp_mmu_zap_invalidated_roots(kvm, false);
- WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+#ifdef CONFIG_KVM_PROVE_MMU
+ KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+#endif
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/*
@@ -325,13 +327,17 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, +1);
+#ifdef CONFIG_KVM_PROVE_MMU
atomic64_inc(&kvm->arch.tdp_mmu_pages);
+#endif
}
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, -1);
+#ifdef CONFIG_KVM_PROVE_MMU
atomic64_dec(&kvm->arch.tdp_mmu_pages);
+#endif
}
/**
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index ec08fa3caf43..51116fe69a50 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -31,6 +31,8 @@ static DEFINE_PER_CPU(struct list_head, wakeup_vcpus_on_cpu);
*/
static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);
+#define PI_LOCK_SCHED_OUT SINGLE_DEPTH_NESTING
+
static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
@@ -89,9 +91,20 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
* current pCPU if the task was migrated.
*/
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
- raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu);
+
+ /*
+ * In addition to taking the wakeup lock for the regular/IRQ
+ * context, tell lockdep it is being taken for the "sched out"
+ * context as well. vCPU loads happens in task context, and
+ * this is taking the lock of the *previous* CPU, i.e. can race
+ * with both the scheduler and the wakeup handler.
+ */
+ raw_spin_lock(spinlock);
+ spin_acquire(&spinlock->dep_map, PI_LOCK_SCHED_OUT, 0, _RET_IP_);
list_del(&vmx->pi_wakeup_list);
- raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ spin_release(&spinlock->dep_map, _RET_IP_);
+ raw_spin_unlock(spinlock);
}
dest = cpu_physical_id(cpu);
@@ -148,11 +161,23 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct pi_desc old, new;
- unsigned long flags;
- local_irq_save(flags);
+ lockdep_assert_irqs_disabled();
- raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ /*
+ * Acquire the wakeup lock using the "sched out" context to workaround
+ * a lockdep false positive. When this is called, schedule() holds
+ * various per-CPU scheduler locks. When the wakeup handler runs, it
+ * holds this CPU's wakeup lock while calling try_to_wake_up(), which
+ * can eventually take the aforementioned scheduler locks, which causes
+ * lockdep to assume there is deadlock.
+ *
+ * Deadlock can't actually occur because IRQs are disabled for the
+ * entirety of the sched_out critical section, i.e. the wakeup handler
+ * can't run while the scheduler locks are held.
+ */
+ raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu),
+ PI_LOCK_SCHED_OUT);
list_add_tail(&vmx->pi_wakeup_list,
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
@@ -176,8 +201,6 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
*/
if (pi_test_on(&new))
__apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
-
- local_irq_restore(flags);
}
static bool vmx_needs_pi_wakeup(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c841817a914a..3712dde0bf9d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11786,6 +11786,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
+ kvm_vcpu_srcu_read_lock(vcpu);
+
r = kvm_apic_accept_events(vcpu);
if (r < 0)
goto out;
@@ -11799,6 +11801,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state = vcpu->arch.mp_state;
out:
+ kvm_vcpu_srcu_read_unlock(vcpu);
+
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e459d97ef397..eb83348f9305 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -667,9 +667,9 @@ static void cond_mitigation(struct task_struct *next)
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
/*
- * Avoid user/user BTB poisoning by flushing the branch predictor
- * when switching between processes. This stops one process from
- * doing Spectre-v2 attacks on another.
+ * Avoid user->user BTB/RSB poisoning by flushing them when switching
+ * between processes. This stops one process from doing Spectre-v2
+ * attacks on another.
*
* Both, the conditional and the always IBPB mode use the mm
* pointer to avoid the IBPB when switching between tasks of the
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 8c534c36adfa..66f066b8feda 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -26,7 +26,7 @@
/* code below belongs to the image kernel */
.align PAGE_SIZE
SYM_FUNC_START(restore_registers)
- ANNOTATE_NOENDBR
+ ENDBR
/* go back to the original page tables */
movq %r9, %cr3
@@ -120,7 +120,7 @@ SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
SYM_FUNC_START(core_restore_code)
- ANNOTATE_NOENDBR
+ ENDBR
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 43dcd8c7badc..1b7710bd0d05 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+/* Number of pages released from the initial allocation. */
+unsigned long xen_released_pages;
+
static __ref void xen_get_vendor(void)
{
init_cpu_devs();
@@ -466,6 +469,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
xen_free_unpopulated_pages(1, &pg);
}
+ /*
+ * Account for the region being in the physmap but unpopulated.
+ * The value in xen_released_pages is used by the balloon
+ * driver to know how much of the physmap is unpopulated and
+ * set an accurate initial memory target.
+ */
+ xen_released_pages += xen_extra_mem[i].n_pfns;
/* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 0e3d930bcb89..9d25d9373945 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
#include <linux/export.h>
#include <linux/mm.h>
@@ -123,8 +125,23 @@ static void __init pvh_arch_setup(void)
{
pvh_reserve_extra_memory();
- if (xen_initial_domain())
+ if (xen_initial_domain()) {
xen_add_preferred_consoles();
+
+ /*
+ * Disable usage of CPU idle and frequency drivers: when
+ * running as hardware domain the exposed native ACPI tables
+ * causes idle and/or frequency drivers to attach and
+ * malfunction. It's Xen the entity that controls the idle and
+ * frequency states.
+ *
+ * For unprivileged domains the exposed ACPI tables are
+ * fabricated and don't contain such data.
+ */
+ disable_cpuidle();
+ disable_cpufreq();
+ WARN_ON(xen_set_default_idle());
+ }
}
void __init xen_pvh_init(struct boot_params *boot_params)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c3db71d96c43..3823e52aef52 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -37,9 +37,6 @@
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
-/* Number of pages released from the initial allocation. */
-unsigned long xen_released_pages;
-
/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 109af12f7647..461bb1526502 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -226,9 +226,7 @@ SYM_CODE_END(xen_early_idt_handler_array)
push %rax
mov $__HYPERVISOR_iret, %eax
syscall /* Do the IRET. */
-#ifdef CONFIG_MITIGATION_SLS
- int3
-#endif
+ ud2 /* The SYSCALL should never return. */
.endm
SYM_CODE_START(xen_iret)
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 0825851656a2..f0dad0c9ce33 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -332,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
@@ -383,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
if (active_percent)
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 0e096fd9b95d..39f83225c181 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -302,7 +302,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
struct ivpu_ipc_consumer cons;
int ret;
- drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
+ pm_runtime_enabled(vdev->drm.dev));
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index ffe7b10f8a76..2a043baf10ca 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
+#include <linux/pm_runtime.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
@@ -96,6 +101,8 @@ err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
unlock:
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
+ int ret;
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ms ? 0 : -EINVAL;
}
@@ -281,6 +300,9 @@ unlock:
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&file_priv->ms_lock);
@@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 90b09840536d..0a7026040188 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -458,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
acpi_pm_wakeup_event(&device->dev);
button = acpi_driver_data(device);
- if (button->suspended)
+ if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE)
return;
input = button->input;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8db09d81918f..3c5f34892734 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
{ },
};
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..f73ce6e13065 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 434f380114af..03dbaf4a13a7 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
+ if (!ap->ioaddr.cmd_addr)
+ return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
+ if (!ap->ioaddr.ctl_addr)
+ return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
+ if (!ap->ioaddr.bmdma_addr)
+ return -ENOMEM;
/*
* Adjust register offsets
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index a482741eb181..c3042eca6332 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- pdc_i2c_read_data[i].reg,
- &spd0[pdc_i2c_read_data[i].ofs]);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc_i2c_read_data[i].reg,
+ &spd0[pdc_i2c_read_data[i].ofs])) {
+ dev_err(host->dev,
+ "Failed in i2c read at index %d: device=%#x, reg=%#x\n",
+ i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
+ return -EIO;
+ }
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
+ if (size < 0)
+ return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a97f2c40c640..2551ebf88dda 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -367,7 +367,7 @@ config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
help
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 6fb4e38fca88..495a72da04c6 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -10,7 +10,7 @@ config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
depends on PROC_FS && INET
select LRU_CACHE
- select LIBCRC32C
+ select CRC32
help
NOTE: In order to authenticate connections you have to select
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3bb9cee0a9b5..aa163ae9b2aa 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2031,7 +2031,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb->disk->minors = 1;
nullb->disk->fops = &null_ops;
nullb->disk->private_data = nullb;
- strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ strscpy(nullb->disk->disk_name, nullb->disk_name);
if (nullb->dev->zoned) {
rv = null_register_zoned_dev(nullb);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2fd05c1bd30b..cdb1543fa4a9 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1140,6 +1140,25 @@ static void ublk_complete_rq(struct kref *ref)
__ublk_complete_rq(req);
}
+static void ublk_do_fail_rq(struct request *req)
+{
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
+
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ blk_mq_requeue_request(req, false);
+ else
+ __ublk_complete_rq(req);
+}
+
+static void ublk_fail_rq_fn(struct kref *ref)
+{
+ struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
+ ref);
+ struct request *req = blk_mq_rq_from_pdu(data);
+
+ ublk_do_fail_rq(req);
+}
+
/*
* Since ublk_rq_task_work_cb always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
@@ -1153,10 +1172,13 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
- blk_mq_requeue_request(req, false);
- else
- ublk_put_req_ref(ubq, req);
+ if (ublk_need_req_ref(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ kref_put(&data->ref, ublk_fail_rq_fn);
+ } else {
+ ublk_do_fail_rq(req);
+ }
}
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
@@ -1349,7 +1371,8 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_RESET_TIMER;
}
-static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+ bool check_cancel)
{
blk_status_t res;
@@ -1368,7 +1391,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
- if (unlikely(ubq->canceling))
+ if (check_cancel && unlikely(ubq->canceling))
return BLK_STS_IOERR;
/* fill iod to slot in io cmd buffer */
@@ -1387,7 +1410,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
blk_status_t res;
- res = ublk_prep_req(ubq, rq);
+ res = ublk_prep_req(ubq, rq, false);
if (res != BLK_STS_OK)
return res;
@@ -1419,7 +1442,7 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
ublk_queue_cmd_list(ubq, &submit_list);
ubq = this_q;
- if (ublk_prep_req(ubq, req) == BLK_STS_OK)
+ if (ublk_prep_req(ubq, req, true) == BLK_STS_OK)
rq_list_add_tail(&submit_list, req);
else
rq_list_add_tail(&requeue_list, req);
@@ -2413,9 +2436,9 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
-static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
const struct ublk_param_basic *p = &ub->params.basic;
int ublksrv_pid = (int)header->data[0];
struct queue_limits lim = {
@@ -2534,9 +2557,8 @@ out_unlock:
}
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
@@ -2585,9 +2607,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
info->nr_hw_queues, info->queue_depth);
}
-static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@@ -2812,9 +2833,8 @@ static int ublk_ctrl_stop_dev(struct ublk_device *ub)
}
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@@ -2843,9 +2863,8 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
}
static int ublk_ctrl_get_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
@@ -2874,9 +2893,8 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
}
static int ublk_ctrl_set_params(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
@@ -2940,9 +2958,8 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
}
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
int i;
@@ -2988,9 +3005,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
}
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
- struct io_uring_cmd *cmd)
+ const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
int i;
@@ -3037,9 +3053,8 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
return ret;
}
-static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
{
- const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
u64 features = UBLK_F_ALL;
@@ -3178,7 +3193,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
goto out;
if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
- ret = ublk_ctrl_get_features(cmd);
+ ret = ublk_ctrl_get_features(header);
goto out;
}
@@ -3195,17 +3210,17 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_CMD_START_DEV:
- ret = ublk_ctrl_start_dev(ub, cmd);
+ ret = ublk_ctrl_start_dev(ub, header);
break;
case UBLK_CMD_STOP_DEV:
ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
case UBLK_CMD_GET_DEV_INFO2:
- ret = ublk_ctrl_get_dev_info(ub, cmd);
+ ret = ublk_ctrl_get_dev_info(ub, header);
break;
case UBLK_CMD_ADD_DEV:
- ret = ublk_ctrl_add_dev(cmd);
+ ret = ublk_ctrl_add_dev(header);
break;
case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(&ub, true);
@@ -3214,19 +3229,19 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_del_dev(&ub, false);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
- ret = ublk_ctrl_get_queue_affinity(ub, cmd);
+ ret = ublk_ctrl_get_queue_affinity(ub, header);
break;
case UBLK_CMD_GET_PARAMS:
- ret = ublk_ctrl_get_params(ub, cmd);
+ ret = ublk_ctrl_get_params(ub, header);
break;
case UBLK_CMD_SET_PARAMS:
- ret = ublk_ctrl_set_params(ub, cmd);
+ ret = ublk_ctrl_set_params(ub, header);
break;
case UBLK_CMD_START_USER_RECOVERY:
- ret = ublk_ctrl_start_recovery(ub, cmd);
+ ret = ublk_ctrl_start_recovery(ub, header);
break;
case UBLK_CMD_END_USER_RECOVERY:
- ret = ublk_ctrl_end_recovery(ub, cmd);
+ ret = ublk_ctrl_end_recovery(ub, header);
break;
default:
ret = -EOPNOTSUPP;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ef30445527a2..bcc26785175d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -53,6 +53,7 @@ struct intel_gtt_driver {
* of the mmio register file, that's done in the generic code. */
void (*cleanup)(void);
void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */
@@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i810_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = val & I810_PTE_LOCAL;
+
+ return val & ~0xfff;
+}
+
static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i830_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return val & ~0xfff;
+}
+
bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local)
+{
+ return intel_private.driver->read_entry(pg, is_present, is_local);
+}
+EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
+
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
@@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i965_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u64 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return ((val & 0xf0) << 28) | (val & ~0xfff);
+}
+
static int i9xx_setup(void)
{
phys_addr_t reg_addr;
@@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = {
.cleanup = i810_cleanup,
.check_flags = i830_check_flags,
.write_entry = i810_write_entry,
+ .read_entry = i810_read_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
@@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i830_chipset_flush,
@@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = {
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 2fa2c9135eac..7eee3eb47a8e 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -392,7 +392,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index 5767aed25cdc..a123c05cbc9e 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -95,7 +95,7 @@ void __init kvm_arm_target_impl_cpu_init(void)
for (i = 0; i < max_cpus; i++) {
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
- i, &res);
+ i, 0, 0, &res);
if (res.a0 != SMCCC_RET_SUCCESS) {
pr_warn("Discovering target implementation CPUs failed\n");
goto mem_free;
@@ -103,7 +103,7 @@ void __init kvm_arm_target_impl_cpu_init(void)
target[i].midr = res.a1;
target[i].revidr = res.a2;
target[i].aidr = res.a3;
- };
+ }
if (!cpu_errata_set_target_impl(max_cpus, target)) {
pr_warn("Failed to set target implementation CPUs\n");
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index b5f0a7a2e1bf..4b70cbaa1caa 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -186,3 +186,37 @@ their hardware offsets within the chip.
Encourage users to switch to using them and eventually remove the existing
global export/unexport attribues.
+
+-------------------------------------------------------------------------------
+
+Remove GPIOD_FLAGS_BIT_NONEXCLUSIVE
+
+GPIOs in the linux kernel are meant to be an exclusive resource. This means
+that the GPIO descriptors (the software representation of the hardware concept)
+are not reference counted and - in general - only one user at a time can
+request a GPIO line and control its settings. The consumer API is designed
+around full control of the line's state as evidenced by the fact that, for
+instance, gpiod_set_value() does indeed drive the line as requested, instead
+of bumping an enable counter of some sort.
+
+A problematic use-case for GPIOs is when two consumers want to use the same
+descriptor independently. An example of such a user is the regulator subsystem
+which may instantiate several struct regulator_dev instances containing
+a struct device but using the same enable GPIO line.
+
+A workaround was introduced in the form of the GPIOD_FLAGS_BIT_NONEXCLUSIVE
+flag but its implementation is problematic: it does not provide any
+synchronization of usage nor did it introduce any enable count meaning the
+non-exclusive users of the same descriptor will in fact "fight" for the
+control over it. This flag should be removed and replaced with a better
+solution, possibly based on the new power sequencing subsystem.
+
+-------------------------------------------------------------------------------
+
+Remove devm_gpiod_unhinge()
+
+devm_gpiod_unhinge() is provided as a way to transfer the ownership of managed
+enable GPIOs to the regulator core. Rather than doing that however, we should
+make it possible for the regulator subsystem to deal with GPIO resources the
+lifetime of which it doesn't control as logically, a GPIO obtained by a caller
+should also be freed by it.
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 0cd4c36ae8aa..541517536489 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -410,7 +410,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
goto err;
}
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
err:
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 6895b65c86af..d27bfac6c9f5 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -823,6 +823,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
struct device_node *np;
+ struct resource *res;
char **names;
int err;
@@ -842,19 +843,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->num_banks++;
/* get register apertures */
- gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
- if (IS_ERR(gpio->secure)) {
- gpio->secure = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(gpio->secure))
- return PTR_ERR(gpio->secure);
- }
-
- gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
- if (IS_ERR(gpio->base)) {
- gpio->base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(gpio->base))
- return PTR_ERR(gpio->base);
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpio->secure = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->secure))
+ return PTR_ERR(gpio->secure);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
err = platform_irq_count(pdev);
if (err < 0)
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index be81fa2b17ab..3dae63f3ea21 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -1011,6 +1011,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
+ device_init_wakeup(&pdev->dev, 0);
gpiochip_remove(&gpio->chip);
device_set_wakeup_capable(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 08205f355ceb..120d1ec5af3b 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -317,11 +317,15 @@ EXPORT_SYMBOL_GPL(devm_gpiod_put);
* @dev: GPIO consumer
* @desc: GPIO descriptor to remove resource management from
*
+ * *DEPRECATED*
+ * This function should not be used. It's been provided as a workaround for
+ * resource ownership issues in the regulator framework and should be replaced
+ * with a better solution.
+ *
* Remove resource management from a GPIO descriptor. This is needed when
* you want to hand over lifecycle management of a descriptor to another
* mechanism.
*/
-
void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
{
int ret;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index eb667f8f1ead..65f6a7177b78 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -193,6 +193,8 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "himax,hx8357", "gpios-reset", false },
{ "himax,hx8369", "gpios-reset", false },
+#endif
+#if IS_ENABLED(CONFIG_MTD_NAND_JZ4780)
/*
* The rb-gpios semantics was undocumented and qi,lb60 (along with
* the ingenic driver) got it wrong. The active state encodes the
@@ -266,6 +268,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
#endif
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ { "atmel,hsmci", "cd-gpios", "cd-inverted" },
+#endif
#if IS_ENABLED(CONFIG_PCI_IMX6)
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
@@ -292,9 +297,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
-#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
- { "atmel,hsmci", "cd-gpios", "cd-inverted" },
-#endif
};
unsigned int i;
bool active_high;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6d83ccfa42ee..2c04ae133848 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -353,7 +353,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a30111d2c3ea..b34b915203f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3643,6 +3643,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev, adev->ip_blocks[i].version->type))
continue;
+ /* Since we skip suspend for S0i3, we need to cancel the delayed
+ * idle work here as the suspend callback never gets called.
+ */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index dc2713ec95a5..9e738fae2b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 9f627caedc3f..667080cc9ae1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -75,11 +75,25 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
+ u32 domains = bo->preferred_domains;
- /* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ dma_resv_assert_held(dmabuf->resv);
+
+ /*
+ * Try pinning into VRAM to allow P2P with RDMA NICs without ODP
+ * support if all attachments can do P2P. If any attachment can't do
+ * P2P just pin into GTT instead.
+ */
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (!attach->peer2peer)
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+
+ if (domains & AMDGPU_GEM_DOMAIN_VRAM)
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ return amdgpu_bo_pin(bo, domains);
}
/**
@@ -134,9 +148,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
-
- } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
- return ERR_PTR(-EBUSY);
}
switch (bo->tbo.resource->mem_type) {
@@ -184,7 +195,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- if (sgt->sgl->page_link) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 464625282872..ecb74ccf1d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -699,12 +699,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -761,10 +759,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 80cd6f5273db..0b9987781f76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
- !(adev->flags & AMD_IS_APU))
+ if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
+ domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 6da8994e0469..2d7f82e98df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -907,6 +908,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index e65916ada23b..ef9538fbbf53 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -894,6 +894,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
+ /* return early if we have already fetched these */
+ if (adev->mes.sched_version && adev->mes.kiq_version)
+ return;
+
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 183dd3346da5..e6ab617b9a40 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1392,17 +1392,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
mes_v12_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
+ ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e477d7509646..9bbee484d57c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1983,9 +1983,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
-
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
@@ -2001,7 +1998,11 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
- dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+ if (!amdgpu_sriov_vf(dev->gpu->adev))
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+
+ if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d0d8ad5368c3..9fed4471405f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1726,9 +1726,30 @@ static const struct dmi_system_id dmi_quirk_table[] = {
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
},
},
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 36a830a7440f..e8bdd7f0c460 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
*
* Panel Replay and PSR SU
* - Enable when:
+ * - VRR is disabled
* - vblank counter is disabled
* - entry is allowed: usermode demonstrates an adequate number of fast
* commits)
@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
#endif
- if (link->replay_settings.replay_feature_enabled &&
+ if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
amdgpu_dm_psr_disable(vblank_work->stream, false);
- } else if (link->psr_settings.psr_feature_enabled &&
+ } else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
@@ -244,6 +246,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -271,8 +275,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index be54f0e696ce..94e99e540691 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -86,6 +86,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/* Store configuration options */
(*dml_ctx)->config = *config;
+ DC_FP_START();
+
/*Initialize SOCBB and DCNIP params */
dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
@@ -96,6 +98,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/*Initialize DML21 instance */
dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
@@ -283,11 +287,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
+ DC_FP_START();
+
/* Use dml_validate_only for fast_validate path */
- if (fast_validate) {
+ if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
- } else
+ else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
+
+ DC_FP_END();
+
return out;
}
@@ -426,8 +435,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
+ DC_FP_START();
+
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 939ee0708bd2..f549a778f6f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -732,11 +732,16 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
return out;
}
+ DC_FP_START();
+
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml2_validate_only(context);
else
out = dml2_validate_and_build_resource(in_dc, context);
+
+ DC_FP_END();
+
return out;
}
@@ -779,11 +784,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
break;
}
+ DC_FP_START();
+
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+
+ DC_FP_END();
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 2a9606118d89..21dc956b5f35 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -429,6 +429,7 @@ struct amd_pm_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
+ int (*pause_power_profile)(void *handle, bool pause);
/* export to amdgpu */
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 81e9b443ca0a..3533d43ed1e7 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -349,6 +349,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (pp_funcs && pp_funcs->pause_power_profile) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->pause_power_profile(
+ adev->powerplay.pp_handle, pause);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index f93d287dbf13..4c0f7ad14816 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -410,6 +410,8 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en);
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause);
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 033c3229b555..46cce1d2aaf3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2398,7 +2398,11 @@ static int smu_switch_power_profile(void *handle,
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
- ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ /* don't switch the active workload when paused */
+ if (smu->pause_workload)
+ ret = 0;
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
@@ -2411,6 +2415,35 @@ static int smu_switch_power_profile(void *handle,
return 0;
}
+static int smu_pause_power_profile(void *handle,
+ bool pause)
+{
+ struct smu_context *smu = handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ int ret;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ smu->pause_workload = pause;
+
+ /* force to bootup default profile */
+ if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu,
+ workload_mask,
+ NULL,
+ 0);
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
@@ -3733,6 +3766,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_pp_table = smu_sys_get_pp_table,
.set_pp_table = smu_sys_set_pp_table,
.switch_power_profile = smu_switch_power_profile,
+ .pause_power_profile = smu_pause_power_profile,
/* export to amdgpu */
.dispatch_tasks = smu_handle_dpm_task,
.load_firmware = smu_load_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3ba169639f54..dd6d0e7aa242 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -558,6 +558,7 @@ struct smu_context {
/* asic agnostic workload mask */
uint32_t workload_mask;
+ bool pause_workload;
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 78391d8f35a9..25fabf336a64 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
- if (speed == 0)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ed05b131ed3a..c8fc271b33b7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -247,6 +247,7 @@ i915-y += \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_display_reset.o \
+ display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
display/intel_display_wa.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 206818f9ad49..f10c0fb8d2c8 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 10ab3cc73e58..49f02aca818b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index d9c3152d4338..0713b2709412 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,6 +29,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 92d32d6b5bce..80b71bd6a837 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,6 +26,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index b42c717085f3..017b617a8069 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 280699438526..ed560e3438db 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 55b9e9bfcc4d..b39aae9165df 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -519,7 +519,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
/*
* Make sure the panel is off before trying to change the mode.
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 674a0e5f0858..4307e2ed03d9 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -344,10 +345,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(display->params.enable_ips));
@@ -361,7 +361,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 013295f66d56..5e8344fdfc28 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -630,84 +630,92 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
spin_unlock_irq(&i915->irq_lock);
}
static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
spin_unlock_irq(&i915->irq_lock);
}
static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
spin_unlock_irq(&i915->irq_lock);
}
static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
spin_unlock_irq(&i915->irq_lock);
}
static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
spin_unlock_irq(&i915->irq_lock);
}
static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
spin_unlock_irq(&i915->irq_lock);
}
static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
spin_unlock_irq(&i915->irq_lock);
}
static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
spin_lock_irq(&i915->irq_lock);
- i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
spin_unlock_irq(&i915->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 7c80e37c1c5f..40751f1547b7 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -9,6 +9,7 @@
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
+#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
@@ -81,13 +82,14 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
+static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
const struct cxsr_latency *latency = &cxsr_latency_table[i];
- bool is_desktop = !IS_MOBILE(i915);
+ bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
i915->is_ddr3 == latency->is_ddr3 &&
@@ -96,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *
return latency;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
return NULL;
}
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -120,14 +123,15 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -145,53 +149,52 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
- struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ if (display->platform.valleyview || display->platform.cherryview) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF_VLV);
+ } else if (display->platform.g4x || display->platform.i965gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.pineview) {
+ val = intel_de_read(display, DSPFW3(display));
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, DSPFW3(display), val);
+ intel_de_posting_read(display, DSPFW3(display));
+ } else if (display->platform.i945g || display->platform.i945gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
+ intel_de_write(display, FW_BLC_SELF, val);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.i915gm) {
/*
* FIXME can't find a bit like this for 915G, and
* yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ intel_de_write(display, INSTPM, val);
+ intel_de_posting_read(display, INSTPM);
} else {
return false;
}
trace_intel_memory_cxsr(display, was_enabled, enable);
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
str_enabled_disabled(was_enabled));
@@ -200,7 +203,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
* intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @display: display device
* @enable: Allow vs. disallow CxSR
*
* Allow or disallow the system to enter a special CxSR
@@ -235,17 +238,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
* the hardware w.r.t. HPLL SR when writing to plane registers.
* Disallowing just CxSR is sufficient.
*/
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(display, enable);
+ if (display->platform.valleyview || display->platform.cherryview)
+ display->wm.vlv.cxsr = enable;
+ else if (display->platform.g4x)
+ display->wm.g4x.cxsr = enable;
+ mutex_unlock(&display->wm.wm_mutex);
return ret;
}
@@ -271,8 +274,8 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
@@ -280,22 +283,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ dsparb2 = intel_de_read(display, DSPARB2);
+ dsparb3 = intel_de_read(display, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -310,26 +311,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x1ff;
@@ -337,22 +338,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
@@ -537,7 +538,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
- * @i915: the device
+ * @display: display device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -555,7 +556,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+static unsigned int intel_calculate_wm(struct intel_display *display,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
@@ -573,10 +574,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -626,11 +627,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
crtc->config->hw.adjusted_mode.crtc_clock;
}
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct intel_display *display)
{
struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -641,21 +642,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
+static void pnv_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned int wm;
- latency = pnv_get_cxsr_latency(dev_priv);
+ latency = pnv_get_cxsr_latency(display);
if (!latency) {
- drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
+ drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ intel_set_memory_cxsr(display, false);
return;
}
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
@@ -663,47 +664,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ reg = intel_de_read(display, DSPFW1(display));
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+ intel_de_write(display, DSPFW1(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ reg = intel_de_read(display, DSPFW3(display));
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+ intel_de_write(display, DSPFW3(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
} else {
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
}
@@ -794,53 +794,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
return max(0, tlb_miss);
}
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct intel_display *display,
const struct g4x_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_de_posting_read(display, DSPFW1(display));
}
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct intel_display *display,
const struct vlv_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ intel_de_write(display, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -848,72 +846,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPHOWM, 0);
+ intel_de_write(display, DSPHOWM1, 0);
+ intel_de_write(display, DSPFW4, 0);
+ intel_de_write(display, DSPFW5, 0);
+ intel_de_write(display, DSPFW6, 0);
+
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (display->platform.cherryview) {
+ intel_de_write(display, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_de_write(display, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_posting_read(display, DSPFW1(display));
}
#undef FW_WM_VLV
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+ display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -962,11 +960,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int latency = display->wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1017,10 +1015,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1033,13 +1031,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1056,8 +1054,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1069,7 +1067,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1109,7 +1107,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
plane->base.name,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
@@ -1117,7 +1115,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FBC watermarks: SR=%d, HPLL=%d\n",
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
@@ -1137,9 +1135,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (level >= dev_priv->display.wm.num_levels)
+ if (level >= display->wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1281,7 +1279,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -1311,7 +1309,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] >
g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
@@ -1329,23 +1327,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
intermediate->hpll_en);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
@@ -1376,7 +1374,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct intel_display *display,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
@@ -1386,7 +1384,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->hpll_en = true;
wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1408,7 +1406,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->fbc_en = false;
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
enum pipe pipe = crtc->pipe;
@@ -1420,23 +1418,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct intel_display *display)
{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *old_wm = &display->wm.g4x;
struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+ g4x_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+ g4x_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
*old_wm = new_wm;
}
@@ -1444,30 +1442,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
static void g4x_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1486,18 +1484,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ if (display->platform.cherryview) {
+ display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1505,13 +1503,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->display.wm.pri_latency[level] == 0)
+ if (display->wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1532,7 +1530,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
+ display->wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1546,8 +1544,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1616,11 +1614,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ drm_WARN_ON(display->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -1631,9 +1629,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1659,10 +1657,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1675,8 +1673,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int level;
bool dirty = false;
@@ -1686,7 +1684,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1703,7 +1701,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
@@ -1734,8 +1732,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1745,7 +1743,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = dev_priv->display.wm.num_levels;
+ wm_state->num_levels = display->wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1755,7 +1753,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1855,6 +1853,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
const struct intel_crtc_state *crtc_state =
@@ -1871,8 +1870,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+ drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(display->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1889,8 +1888,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
switch (crtc->pipe) {
case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1902,12 +1901,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1919,12 +1918,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb3 = intel_de_read_fw(display, DSPARB3);
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1936,14 +1935,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB3, dsparb3);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
default:
break;
}
- intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
+ intel_de_read_fw(display, DSPARB(display));
spin_unlock(&uncore->lock);
}
@@ -2018,16 +2017,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct intel_display *display,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->level = display->wm.num_levels - 1;
wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2046,7 +2045,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
@@ -2061,35 +2060,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct intel_display *display)
{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *old_wm = &display->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+ vlv_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
+ chv_set_memory_dvfs(display, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
+ chv_set_memory_pm5(display, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+ vlv_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
+ chv_set_memory_pm5(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
+ chv_set_memory_dvfs(display, true);
*old_wm = new_wm;
}
@@ -2097,33 +2096,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
static void vlv_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void i965_update_wm(struct drm_i915_private *dev_priv)
+static void i965_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
int srwm = 1;
@@ -2131,7 +2130,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
@@ -2152,7 +2151,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d, wm: %d\n",
entries, srwm);
@@ -2167,7 +2166,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
@@ -2175,39 +2174,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(cursor_sr, CURSOR_SR));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
#undef FW_WM
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
return intel_crtc_for_pipe(display, plane->pipe);
@@ -2216,7 +2214,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
return NULL;
}
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+static void i9xx_update_wm(struct intel_display *display)
{
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
@@ -2226,29 +2224,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int planea_wm, planeb_wm;
struct intel_crtc *crtc;
- if (IS_I945GM(dev_priv))
+ if (display->platform.i945gm)
wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(display) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_A);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_A);
+ crtc = intel_crtc_for_plane(display, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2257,25 +2255,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planea_wm = wm_info->max_wm;
}
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
wm_info = &i830_bc_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_B);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_B);
+ crtc = intel_crtc_for_plane(display, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2284,11 +2282,11 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planeb_wm = wm_info->max_wm;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
+ crtc = single_enabled_crtc(display);
+ if (display->platform.i915gm && crtc) {
struct drm_gem_object *obj;
obj = intel_fb_bo(crtc->base.primary->state->fb);
@@ -2304,10 +2302,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
+ if (HAS_FW_BLC(display) && crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *pipe_mode =
@@ -2320,7 +2318,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int cpp;
int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i915gm || display->platform.i945gm)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2328,20 +2326,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ if (display->platform.i945g || display->platform.i945gm)
+ intel_de_write(display, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ intel_de_write(display, FW_BLC_SELF, srwm & 0x3f);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
@@ -2352,34 +2350,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+ intel_de_write(display, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC2, fwater_hi);
if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
-static void i845_update_wm(struct drm_i915_private *dev_priv)
+static void i845_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
u32 fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
&i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(display, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2534,24 +2532,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
}
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
return 768;
else
return 512;
}
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(struct intel_display *display,
int level, bool is_sprite)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2563,30 +2561,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
}
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(struct intel_display *display, int level)
{
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 31;
else
return 15;
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+ unsigned int fifo_size = ilk_display_fifo_size(display);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -2594,14 +2592,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
+ fifo_size /= INTEL_NUM_PIPES(display);
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) < 7)
+ if (DISPLAY_VER(display) < 7)
fifo_size /= 2;
}
@@ -2617,11 +2615,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config)
{
@@ -2630,32 +2628,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
+ return ilk_cursor_wm_reg_max(display, level);
}
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(display, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(struct intel_display *display,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_reg_max(display, level, false);
+ max->spr = ilk_plane_wm_reg_max(display, level, true);
+ max->cur = ilk_cursor_wm_reg_max(display, level);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+static bool ilk_validate_wm_level(struct intel_display *display,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
@@ -2679,15 +2677,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
@@ -2700,7 +2698,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
return ret;
}
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(struct intel_display *display,
const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
@@ -2709,9 +2707,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+ u16 pri_latency = display->wm.pri_latency[level];
+ u16 spr_latency = display->wm.spr_latency[level];
+ u16 cur_latency = display->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2735,11 +2733,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void hsw_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u64 sskpd;
- i915->display.wm.num_levels = 5;
+ display->wm.num_levels = 5;
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
@@ -2752,11 +2751,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
}
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void snb_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 sskpd;
- i915->display.wm.num_levels = 4;
+ display->wm.num_levels = 4;
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
@@ -2766,11 +2766,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
}
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void ilk_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 mltr;
- i915->display.wm.num_levels = 3;
+ display->wm.num_levels = 3;
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
@@ -2780,24 +2781,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
+static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min)
{
int level;
@@ -2805,13 +2803,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 1; level < display->wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct intel_display *display)
{
bool changed;
@@ -2819,21 +2817,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12);
if (!changed)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct intel_display *display)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
@@ -2846,50 +2844,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
+ if (display->wm.pri_latency[3] == 0 &&
+ display->wm.spr_latency[3] == 0 &&
+ display->wm.cur_latency[3] == 0)
return;
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
+ display->wm.pri_latency[3] = 0;
+ display->wm.spr_latency[3] = 0;
+ display->wm.cur_latency[3] = 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct intel_display *display)
{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ if (display->platform.broadwell || display->platform.haswell)
+ hsw_read_wm_latency(display, display->wm.pri_latency);
+ else if (DISPLAY_VER(display) >= 6)
+ snb_read_wm_latency(display, display->wm.pri_latency);
else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ ilk_read_wm_latency(display, display->wm.pri_latency);
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(display->wm.spr_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
+ memcpy(display->wm.cur_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+ intel_fixup_spr_wm_latency(display, display->wm.spr_latency);
+ intel_fixup_cur_wm_latency(display, display->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
+ if (DISPLAY_VER(display) == 6) {
+ snb_wm_latency_quirk(display);
+ snb_wm_lp3_irq_quirk(display);
}
}
-static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct intel_display *display,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2901,11 +2899,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(display->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2916,7 +2914,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
@@ -2943,10 +2941,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = dev_priv->display.wm.num_levels - 1;
+ usable_level = display->wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2954,18 +2952,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ ilk_compute_wm_level(display, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ if (!ilk_validate_pipe_wm(display, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ ilk_compute_wm_reg_maximums(display, 1, &max);
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ ilk_compute_wm_level(display, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -2973,7 +2971,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
+ if (!ilk_validate_wm_level(display, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2990,7 +2988,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3015,7 +3013,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->sprites_enabled |= active->sprites_enabled;
intermediate->sprites_scaled |= active->sprites_scaled;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
const struct intel_wm_level *active_wm = &active->wm[level];
@@ -3036,7 +3034,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, intermediate))
+ if (!ilk_validate_pipe_wm(display, intermediate))
return -EINVAL;
/*
@@ -3068,7 +3066,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct intel_display *display,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3076,7 +3074,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3101,31 +3099,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct intel_display *display,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, num_levels = dev_priv->display.wm.num_levels;
+ int level, num_levels = display->wm.num_levels;
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6;
/* merge each WM1+ level */
for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+ ilk_merge_wm_level(display, level, wm);
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
+ else if (!ilk_validate_wm_level(display, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -3141,8 +3139,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
+ if (DISPLAY_VER(display) == 5 && HAS_FBC(display) &&
+ display->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3158,16 +3156,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct intel_display *display,
int level)
{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (display->platform.haswell || display->platform.broadwell)
return 2 * level;
else
- return dev_priv->display.wm.pri_latency[level];
+ return display->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct intel_display *display,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
@@ -3191,14 +3189,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) |
WM_LP_PRIMARY(r->pri_val) |
WM_LP_CURSOR(r->cur_val);
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
@@ -3209,19 +3207,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ if (DISPLAY_VER(display) < 7 && r->spr_val) {
+ drm_WARN_ON(display->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ if (drm_WARN_ON(display->drm, !r->enable))
continue;
results->wm_pipe[pipe] =
@@ -3236,13 +3234,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* case both are at the same level. Prefer r1 in case they're the same.
*/
static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct intel_display *display,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int level, level1 = 0, level2 = 0;
- for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 1; level < display->wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3268,7 +3266,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct intel_display *display,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
@@ -3276,7 +3274,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
enum pipe pipe;
int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3314,25 +3312,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
return dirty;
}
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct intel_display *display,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3348,73 +3346,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct intel_display *display,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
unsigned int dirty;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ dirty = ilk_compute_wm_dirty(display, previous, results);
if (!dirty)
return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+ _ilk_disable_lp_wm(display, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
+ if (display->platform.haswell || display->platform.broadwell)
+ intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+ intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (DISPLAY_VER(dev_priv) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->display.wm.hw = *results;
+ display->wm.hw = *results;
}
-bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct intel_display *display)
{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+ return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL);
}
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct intel_display *display,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -3426,7 +3424,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
}
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct intel_display *display)
{
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
@@ -3434,18 +3432,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+ ilk_compute_wm_config(display, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
+ if (DISPLAY_VER(display) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -3453,50 +3451,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(display, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(display, &results);
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_display *display = to_intel_display(crtc);
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -3523,7 +3520,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
active->wm[level].enable = true;
}
@@ -3572,7 +3569,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+void ilk_wm_sanitize(struct intel_display *display)
{
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
@@ -3583,14 +3580,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ if (!display->funcs.wm->optimize_watermarks)
return;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9))
return;
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -3606,14 +3603,14 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
intel_state->skip_intermediate_wm = true;
ret = ilk_sanitize_watermarks_add_affected(state);
if (ret)
goto fail;
- ret = intel_atomic_check(&dev_priv->drm, state);
+ ret = intel_atomic_check(display->drm, state);
if (ret)
goto fail;
@@ -3643,7 +3640,7 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -3657,18 +3654,18 @@ fail:
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct intel_display *display,
struct g4x_wm_values *wm)
{
u32 tmp;
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -3676,21 +3673,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct intel_display *display,
struct vlv_wm_values *wm)
{
enum pipe pipe;
u32 tmp;
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+ for_each_pipe(display, pipe) {
+ tmp = intel_de_read(display, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -3702,34 +3699,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ if (display->platform.cherryview) {
+ tmp = intel_de_read(display, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ tmp = intel_de_read(display, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ tmp = intel_de_read(display, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -3741,11 +3738,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ tmp = intel_de_read(display, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -3759,16 +3756,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct intel_display *display)
{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *wm = &display->wm.g4x;
struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+ g4x_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -3833,7 +3830,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3841,26 +3838,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
str_yes_no(wm->fbc_en));
}
-static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -3873,7 +3869,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3884,36 +3880,37 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
}
- g4x_program_watermarks(dev_priv);
+ g4x_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
- vlv_read_wm_values(dev_priv, wm);
+ vlv_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -3935,10 +3932,10 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -3948,7 +3945,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_punit_put(dev_priv);
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -3988,7 +3985,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3997,20 +3994,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -4023,7 +4019,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4031,33 +4027,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
}
- vlv_program_watermarks(dev_priv);
+ vlv_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
*/
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0);
/*
* Don't touch WM_LP_SPRITE_ENABLE here.
@@ -4065,37 +4061,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct intel_display *display)
{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+ ilk_init_lp_watermarks(display);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+ hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK);
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK);
+ if (DISPLAY_VER(display) >= 7) {
+ hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ if (display->platform.haswell || display->platform.broadwell)
+ hw->partitioning = (intel_de_read(display, WM_MISC) &
WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ else if (display->platform.ivybridge)
+ hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) &
DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
static const struct intel_wm_funcs ilk_wm_funcs = {
@@ -4145,39 +4141,41 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-void i9xx_wm_init(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ ilk_setup_wm_latency(display);
+ display->funcs.wm = &ilk_wm_funcs;
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ vlv_setup_wm_latency(display);
+ display->funcs.wm = &vlv_wm_funcs;
+ } else if (display->platform.g4x) {
+ g4x_setup_wm_latency(display);
+ display->funcs.wm = &g4x_wm_funcs;
+ } else if (display->platform.pineview) {
+ if (!pnv_get_cxsr_latency(display)) {
+ drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
+ intel_set_memory_cxsr(display, false);
+ display->funcs.wm = &nop_funcs;
} else {
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ display->funcs.wm = &pnv_wm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
+ } else if (DISPLAY_VER(display) == 4) {
+ display->funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(display) == 3) {
+ display->funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(display) == 2) {
+ if (INTEL_NUM_PIPES(display) == 1)
+ display->funcs.wm = &i845_wm_funcs;
else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ display->funcs.wm = &i9xx_wm_funcs;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
+ display->funcs.wm = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index 06ac37c6c94b..7bb363b2a756 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_cxsr(struct drm_i915_private *i915);
-void ilk_wm_sanitize(struct drm_i915_private *i915);
-bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
-void i9xx_wm_init(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct intel_display *display);
+void ilk_wm_sanitize(struct intel_display *display);
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable);
+void i9xx_wm_init(struct intel_display *display);
#else
-static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct intel_display *display)
{
return false;
}
-static inline void ilk_wm_sanitize(struct drm_i915_private *i915)
+static inline void ilk_wm_sanitize(struct intel_display *display)
{
}
-static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
+static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
return false;
}
-static inline void i9xx_wm_init(struct drm_i915_private *i915)
+static inline void i9xx_wm_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 402b7b2e1829..ca7033251e91 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -1826,107 +1827,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt;
+ u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk;
+ u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
/*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
+ * The clock and data lane prepare timing parameters are in expressed in
+ * units of 1/4 escape clocks, and all the other timings parameters in
+ * escape clocks.
*/
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
- prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
+ tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns);
+ tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7);
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
+ tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ mipi_config->tclk_prepare, tlpx_ns);
+ tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15);
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
- trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
+ tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3);
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
+ ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns);
+ ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7);
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
- hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
+ ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ mipi_config->ths_prepare, tlpx_ns);
+ ths_zero_esc_clk = min(ths_zero_esc_clk, 15);
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "exit_zero_cnt out of range (%d)\n",
- exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
+ ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ ths_exit_esc_clk = min(ths_exit_esc_clk, 7);
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
+ CLK_PREPARE(tclk_prepare_esc_clk) |
CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
+ CLK_ZERO(tclk_zero_esc_clk) |
CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
+ CLK_PRE(tclk_pre_esc_clk));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
+ HS_PREPARE(ths_prepare_esc_clk) |
HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
+ HS_ZERO(ths_zero_esc_clk) |
HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
+ HS_EXIT(ths_exit_esc_clk));
intel_dsi_log_params(intel_dsi);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 03dc54c802d3..e83feca5c9c9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -33,16 +33,17 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
+#include "intel_fb.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct drm_property *property,
u64 *val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio)
+ if (property == display->properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->display.properties.broadcast_rgb)
+ else if (property == display->properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
@@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_property *property,
u64 val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio) {
+ if (property == display->properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->display.properties.broadcast_rgb) {
+ if (property == display->properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
- drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 178dc6c8de80..4f3fa966c537 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -16,6 +16,7 @@
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
@@ -901,11 +902,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ with_intel_display_rpm(display) {
u32 hw_level;
drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a8d08d7d82b3..fabfcf2caa69 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
@@ -3115,7 +3116,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
{
struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
- intel_wakeref_t wakeref;
vbt = firmware_get_vbt(display, sizep);
@@ -3127,11 +3127,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
* through MMIO or PCI mapping
*/
if (!vbt && IS_DGFX(i915))
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
if (!vbt)
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
return vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 048be2872247..a5dd2932b852 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -39,14 +39,15 @@ struct intel_qgv_info {
u8 deinterleave;
};
-static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dclk_ratio, dclk_reference;
u32 val;
- val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
if (val & DG1_QCLK_REFERENCE)
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
@@ -54,18 +55,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
- val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
sp->dclk *= 2;
if (sp->dclk == 0)
return -EINVAL;
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
@@ -74,22 +75,23 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -102,14 +104,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
- struct intel_psf_gv_point *points)
+static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
+ struct intel_psf_gv_point *points)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -122,10 +125,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+static u16 icl_qgv_points_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -142,49 +145,51 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
-static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
{
- return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask);
return ret;
}
- dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ display->sagv.status = is_sagv_enabled(display, points_mask) ?
I915_SAGV_ENABLED : I915_SAGV_DISABLED;
return 0;
}
-static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int mtl_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp, int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val, val2;
u16 dclk;
- val = intel_uncore_read(&dev_priv->uncore,
+ val = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
- val2 = intel_uncore_read(&dev_priv->uncore,
+ val2 = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
@@ -200,29 +205,30 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
}
static int
-intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+intel_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- if (DISPLAY_VER(dev_priv) >= 14)
- return mtl_read_qgv_point_info(dev_priv, sp, point);
- else if (IS_DG1(dev_priv))
- return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_read_qgv_point_info(display, sp, point);
+ else if (display->platform.dg1)
+ return dg1_mchbar_read_qgv_point_info(display, sp, point);
else
- return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+ return icl_pcode_read_qgv_point_info(display, sp, point);
}
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct intel_display *display,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = &i915->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -244,13 +250,14 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = 4;
break;
case INTEL_DRAM_GDDR:
+ case INTEL_DRAM_GDDR_ECC:
qi->channel_width = 32;
break;
default:
MISSING_CASE(dram_info->type);
return -EINVAL;
}
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -265,7 +272,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (display->platform.rocketlake) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
@@ -284,39 +291,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- } else if (DISPLAY_VER(dev_priv) == 11) {
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ } else if (DISPLAY_VER(display) == 11) {
+ qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ ret = intel_read_qgv_point_info(display, sp, i);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
+ drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
sp->t_rcd, sp->t_rc);
}
if (qi->num_psf_points > 0) {
- ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
if (ret) {
- drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSF GV %d: CLK=%d \n",
i, qi->psf_points[i].clk);
}
@@ -398,20 +405,34 @@ static const struct intel_sa_info xe2_hpd_sa_info = {
/* Other values not used by simplified algorithm */
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
+ .derating = 45,
+ .deprogbwlimit = 53,
+ /* Other values not used by simplified algorithm */
+};
+
+static const struct intel_sa_info xe3lpd_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 65, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, i915->dram_info.num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -422,7 +443,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
int clpchgroup;
int j;
@@ -449,7 +470,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -460,44 +481,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = &i915->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- if (DISPLAY_VER(dev_priv) < 14 &&
+ if (DISPLAY_VER(display) < 14 &&
(dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
- if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
- drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
+ drm_warn(display->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -514,7 +536,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
@@ -522,7 +544,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1) {
- bi_next = &dev_priv->display.bw.max[i + 1];
+ bi_next = &display->bw.max[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
@@ -554,7 +576,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
num_channels *
qi.channel_width, 8);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
i, j, bi->num_planes, bi->deratedbw[j],
bi->peakbw[j]);
@@ -565,7 +587,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
@@ -577,17 +599,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static void dg2_get_bw_info(struct drm_i915_private *i915)
+static void dg2_get_bw_info(struct intel_display *display)
{
- unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i;
/*
@@ -598,7 +620,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -606,20 +628,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+static int xe2_hpd_get_bw_info(struct intel_display *display,
const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
int num_channels = i915->dram_info.num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(i915, &qi, true);
+ ret = icl_get_qgv_points(display, &qi, true);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -631,33 +654,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
const struct intel_qgv_point *point = &qi.points[i];
int bw = num_channels * (qi.channel_width / 8) * point->dclk;
- i915->display.bw.max[0].deratedbw[i] =
+ display->bw.max[0].deratedbw[i] =
min(maxdebw, (100 - sa->derating) * bw / 100);
- i915->display.bw.max[0].peakbw[i] = bw;
+ display->bw.max[0].peakbw[i] = bw;
- drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
- i, i915->display.bw.max[0].deratedbw[i],
- i915->display.bw.max[0].peakbw[i]);
+ drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, display->bw.max[0].deratedbw[i],
+ display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
- i915->display.bw.max[0].num_planes = 1;
- i915->display.bw.max[0].num_qgv_points = qi.num_points;
- for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
- memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
- sizeof(i915->display.bw.max[0]));
+ display->bw.max[0].num_planes = 1;
+ display->bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
+ memcpy(&display->bw.max[i], &display->bw.max[0],
+ sizeof(display->bw.max[0]));
/*
* Xe2_HPD should always have exactly two QGV points representing
* battery and plugged-in operation.
*/
- drm_WARN_ON(&i915->drm, qi.num_points != 2);
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ drm_WARN_ON(display->drm, qi.num_points != 2);
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -667,9 +690,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -685,7 +708,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
return UINT_MAX;
}
-static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int tgl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -695,9 +718,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -713,52 +736,59 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
return 0;
}
-static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+static unsigned int adl_psf_bw(struct intel_display *display,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[0];
+ &display->bw.max[0];
return bi->psf_bw[psf_gv_point];
}
-static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+static unsigned int icl_qgv_bw(struct intel_display *display,
int num_active_planes, int qgv_point)
{
unsigned int idx;
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ if (DISPLAY_VER(display) >= 12)
+ idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
else
- idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+ idx = icl_max_bw_index(display, num_active_planes, qgv_point);
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ if (idx >= ARRAY_SIZE(display->bw.max))
return 0;
- return i915->display.bw.max[idx].deratedbw[qgv_point];
+ return display->bw.max[idx].deratedbw[qgv_point];
}
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ const struct dram_info *dram_info = &to_i915(display->drm)->dram_info;
+
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(dev_priv) >= 14)
- tgl_get_bw_info(dev_priv, &mtl_sa_info);
- else if (IS_DG2(dev_priv))
- dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_P(dev_priv))
- tgl_get_bw_info(dev_priv, &adlp_sa_info);
- else if (IS_ALDERLAKE_S(dev_priv))
- tgl_get_bw_info(dev_priv, &adls_sa_info);
- else if (IS_ROCKETLAKE(dev_priv))
- tgl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 12)
- tgl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_get_bw_info(dev_priv, &icl_sa_info);
+ if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, &xe3lpd_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
+ dram_info->type == INTEL_DRAM_GDDR_ECC)
+ xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
+ xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info);
+ else if (DISPLAY_VER(display) >= 14)
+ tgl_get_bw_info(display, &mtl_sa_info);
+ else if (display->platform.dg2)
+ dg2_get_bw_info(display);
+ else if (display->platform.alderlake_p)
+ tgl_get_bw_info(display, &adlp_sa_info);
+ else if (display->platform.alderlake_s)
+ tgl_get_bw_info(display, &adls_sa_info);
+ else if (display->platform.rocketlake)
+ tgl_get_bw_info(display, &rkl_sa_info);
+ else if (DISPLAY_VER(display) == 12)
+ tgl_get_bw_info(display, &tgl_sa_info);
+ else if (DISPLAY_VER(display) == 11)
+ icl_get_bw_info(display, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -772,8 +802,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -787,7 +817,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
data_rate += crtc_state->data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->data_rate_y[plane_id];
}
@@ -795,39 +825,38 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
}
/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_bw_crtc_min_cdclk(struct intel_display *display,
+ unsigned int data_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
}
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int num_active_planes = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
}
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -836,10 +865,10 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -847,10 +876,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -858,27 +887,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
-static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
int num_active_planes)
{
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate =
- icl_qgv_bw(i915, num_active_planes, i);
+ icl_qgv_bw(display, num_active_planes, i);
/*
* We need to know which qgv point gives us
@@ -897,23 +926,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
return max_bw_point;
}
-static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
unsigned int qgv_points,
unsigned int psf_points)
{
return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
}
-static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
unsigned int max_bw_point_mask = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate > max_bw) {
max_bw_point_mask = BIT(i);
@@ -926,29 +955,29 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
return max_bw_point_mask;
}
-static void icl_force_disable_sagv(struct drm_i915_private *i915,
+static void icl_force_disable_sagv(struct intel_display *display,
struct intel_bw_state *bw_state)
{
- unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
- unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
- bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
- drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
bw_state->qgv_points_mask);
- icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+ icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
-static int mtl_find_qgv_points(struct drm_i915_private *i915,
+static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
struct intel_bw_state *new_bw_state)
{
unsigned int best_rate = UINT_MAX;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int qgv_peak_bw = 0;
int i;
int ret;
@@ -962,9 +991,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
- drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
}
@@ -974,27 +1003,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
*/
for (i = 0; i < num_qgv_points; i++) {
unsigned int bw_index =
- tgl_max_bw_index(i915, num_active_planes, i);
+ tgl_max_bw_index(display, num_active_planes, i);
unsigned int max_data_rate;
- if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ if (bw_index >= ARRAY_SIZE(display->bw.max))
continue;
- max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+ max_data_rate = display->bw.max[bw_index].deratedbw[i];
if (max_data_rate < data_rate)
continue;
if (max_data_rate - data_rate < best_rate) {
best_rate = max_data_rate - data_rate;
- qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
}
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
- drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/*
@@ -1002,7 +1031,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* satisfying the required data rate is found
*/
if (qgv_peak_bw == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
@@ -1013,14 +1042,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int icl_find_qgv_points(struct drm_i915_private *i915,
+static int icl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
@@ -1031,22 +1060,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate = icl_qgv_bw(i915,
+ unsigned int max_data_rate = icl_qgv_bw(display,
num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
@@ -1057,14 +1086,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* reasons.
*/
if (qgv_points == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
@@ -1075,9 +1104,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
+ qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
+ drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
@@ -1085,7 +1114,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
/*
@@ -1101,80 +1130,90 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+static int intel_bw_check_qgv_points(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
unsigned int num_active_planes =
- intel_bw_num_active_planes(i915, new_bw_state);
+ intel_bw_num_active_planes(display, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (DISPLAY_VER(i915) >= 14)
- return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_find_qgv_points(display, data_rate, num_active_planes,
new_bw_state);
else
- return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ return icl_find_qgv_points(display, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
-static bool intel_bw_state_changed(struct drm_i915_private *i915,
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_bw_state_changed(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
{
enum pipe pipe;
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *old_crtc_bw =
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
&old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_crtc_bw =
+ const struct intel_dbuf_bw *new_dbuf_bw =
&new_bw_state->dbuf_bw[pipe];
- enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
- if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
- old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
- return true;
- }
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
- if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
+ intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
return true;
}
return false;
}
-static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
struct intel_crtc *crtc,
enum plane_id plane_id,
const struct skl_ddb_entry *ddb,
unsigned int data_rate)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
enum dbuf_slice slice;
/*
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
- crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
- crtc_bw->active_planes[slice] |= BIT(plane_id);
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
}
}
-static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(crtc_bw, 0, sizeof(*crtc_bw));
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
if (!crtc_state->hw.active)
return;
@@ -1187,12 +1226,12 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
if (plane_id == PLANE_CURSOR)
continue;
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb[plane_id],
crtc_state->data_rate[plane_id]);
- if (DISPLAY_VER(i915) < 11)
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb_y[plane_id],
crtc_state->data_rate[plane_id]);
}
@@ -1200,13 +1239,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
/* "Maximum Data Buffer Bandwidth" */
static int
-intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+intel_bw_dbuf_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int total_max_bw = 0;
enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
+ for_each_dbuf_slice(display, slice) {
int num_active_planes = 0;
unsigned int max_bw = 0;
enum pipe pipe;
@@ -1215,11 +1254,11 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
- max_bw = max(crtc_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(crtc_bw->active_planes[slice]);
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
}
max_bw *= num_active_planes;
@@ -1229,16 +1268,18 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
return DIV_ROUND_UP(total_max_bw, 64);
}
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
enum pipe pipe;
int min_cdclk;
- min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
+ min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
- for_each_pipe(i915, pipe)
- min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
+ for_each_pipe(display, pipe)
+ min_cdclk = max(min_cdclk,
+ intel_bw_crtc_min_cdclk(display,
+ bw_state->data_rate[pipe]));
return min_cdclk;
}
@@ -1246,42 +1287,49 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_bw_state *new_bw_state = NULL;
const struct intel_bw_state *old_bw_state = NULL;
const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
int i;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
-
- new_bw_state->min_cdclk[crtc->pipe] =
- intel_bw_crtc_min_cdclk(crtc_state);
+ new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
}
if (!old_bw_state)
return 0;
- if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
- old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+ old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
/*
* No need to check against the cdclk state if
@@ -1309,7 +1357,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
new_min_cdclk, cdclk_state->bw_min_cdclk);
*need_cdclk_calc = true;
@@ -1319,7 +1367,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1353,7 +1401,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
*changed = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] data rate %u num active planes %u\n",
crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
@@ -1363,16 +1411,103 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state)
+static int intel_bw_modeset_checks(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_bw_state *old_bw_state = NULL;
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_can_enable_sagv(old_crtc_state) ==
+ intel_crtc_can_enable_sagv(new_crtc_state))
+ continue;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ if (intel_can_enable_sagv(display, new_bw_state) !=
+ intel_can_enable_sagv(display, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+{
+ struct intel_display *display = to_intel_display(state);
bool changed = false;
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ if (any_ms) {
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bw_check_sagv_mask(state);
+ if (ret)
+ return ret;
+
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1383,9 +1518,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- (intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state) ||
- new_bw_state->force_check_qgv))
+ intel_can_enable_sagv(display, old_bw_state) !=
+ intel_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1395,28 +1529,25 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
+ ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
if (ret)
return ret;
- new_bw_state->force_check_qgv = false;
-
return 0;
}
static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
bw_state->data_rate[crtc->pipe],
bw_state->num_active_planes[crtc->pipe]);
@@ -1432,6 +1563,7 @@ void intel_bw_update_hw_state(struct intel_display *display)
return;
bw_state->active_pipes = 0;
+ bw_state->pipe_sagv_reject = 0;
for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
@@ -1443,6 +1575,11 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
+
+ skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
+
+ /* initially SAGV has been forced off */
+ bw_state->pipe_sagv_reject |= BIT(pipe);
}
}
@@ -1458,6 +1595,7 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
+ memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
@@ -1483,9 +1621,8 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *i915)
+int intel_bw_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -1499,8 +1636,8 @@ int intel_bw_init(struct drm_i915_private *i915)
* Limit this only if we have SAGV. And for Display version 14 onwards
* sagv is handled though pmdemand requests
*/
- if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
- icl_force_disable_sagv(i915, state);
+ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
+ icl_force_disable_sagv(display, state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 3313e4eac4f0..eb2cc883e9c1 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -12,7 +12,6 @@
#include "intel_display_power.h"
#include "intel_global_state.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -49,13 +48,6 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
- /*
- * Flag to force the QGV comparison in atomic check right after the
- * hw state readout
- */
- bool force_check_qgv;
-
- int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
@@ -72,14 +64,14 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
-int intel_bw_atomic_check(struct intel_atomic_state *state);
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+void intel_bw_init_hw(struct intel_display *display);
+int intel_bw_init(struct intel_display *display);
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2a8749a0213e..6830950aae3f 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1972,9 +1972,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display,
static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
@@ -2808,7 +2806,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_bw_state *bw_state;
@@ -2836,7 +2833,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
bw_state = intel_atomic_get_new_bw_state(state);
if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+ min_cdclk = intel_bw_min_cdclk(display, bw_state);
if (cdclk_state->bw_min_cdclk != min_cdclk) {
int ret;
@@ -3342,6 +3339,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
void intel_cdclk_update_hw_state(struct intel_display *display)
{
+ const struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
@@ -3359,6 +3358,8 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
+
+ cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index cfe14162231d..98dddf72c0eb 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,7 +22,9 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
@@ -405,14 +407,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false;
return crtc_state->limited_color_range;
@@ -516,7 +517,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
@@ -538,7 +538,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
@@ -3983,12 +3983,10 @@ int intel_color_init(struct intel_display *display)
void intel_color_init_hooks(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (HAS_GMCH(display)) {
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
display->funcs.color = &chv_color_funcs;
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
display->funcs.color = &vlv_color_funcs;
else if (DISPLAY_VER(display) >= 4)
display->funcs.color = &i965_color_funcs;
@@ -4005,7 +4003,7 @@ void intel_color_init_hooks(struct intel_display *display)
display->funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(display) == 8)
display->funcs.color = &bdw_color_funcs;
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
display->funcs.color = &hsw_color_funcs;
else if (DISPLAY_VER(display) == 7)
display->funcs.color = &ivb_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 17eea244cc83..f5cc38dbe559 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_combo_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index e42357bd9e80..6c81c9f2fd09 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -31,8 +31,10 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_connector.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -154,13 +156,14 @@ void intel_connector_destroy(struct drm_connector *connector)
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
int ret;
ret = intel_backlight_device_register(intel_connector);
if (ret)
goto err;
- if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
goto err_backlight;
}
@@ -204,10 +207,10 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(dev,
- !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
@@ -264,20 +267,19 @@ static const struct drm_prop_enum_list force_audio_names[] = {
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.force_audio;
+ prop = display->properties.force_audio;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
- "audio",
- force_audio_names,
- ARRAY_SIZE(force_audio_names));
+ prop = drm_property_create_enum(display->drm, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- dev_priv->display.properties.force_audio = prop;
+ display->properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -291,20 +293,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = {
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.broadcast_rgb;
+ prop = display->properties.broadcast_rgb;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- dev_priv->display.properties.broadcast_rgb = prop;
+ display->properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
@@ -336,14 +337,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
void
intel_attach_scaling_mode_property(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
u32 scaling_modes;
scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN);
/* On GMCH platforms borders are only possible on the LVDS port */
- if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, scaling_modes);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 76ffb3f8467c..cca22d2402e8 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -532,8 +532,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
@@ -550,7 +548,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*
* Just disable HPD interrupts here to prevent this
*/
- reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_block(&crt->base);
save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
drm_dbg_kms(display->drm,
@@ -577,8 +575,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- if (reenable_hpd)
- intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_clear_and_unblock(&crt->base);
return ret;
}
@@ -609,7 +606,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
- i915_hotplug_interrupt_update(dev_priv,
+ i915_hotplug_interrupt_update(display,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
@@ -627,7 +624,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+ i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
}
@@ -880,7 +877,7 @@ intel_crt_detect(struct drm_connector *connector,
wakeref = intel_display_power_get(display, encoder->power_domain);
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -904,7 +901,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -1084,7 +1081,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(display) &&
+ if (HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 599ddce96371..0c7f91046996 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -5,9 +5,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
#include "intel_vblank.h"
@@ -42,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p,
}
static void
-intel_dump_infoframe(struct drm_i915_private *i915,
+intel_dump_infoframe(struct intel_display *display,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+ hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -136,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p,
}
static void
-ilk_dump_csc(struct drm_i915_private *i915,
+ilk_dump_csc(struct intel_display *display,
struct drm_printer *p,
const char *name,
const struct intel_csc_matrix *csc)
@@ -152,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915,
csc->coeff[3 * i + 1],
csc->coeff[3 * i + 2]);
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
@@ -178,7 +179,6 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
{
struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
struct drm_printer p;
@@ -188,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
@@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ intel_dump_infoframe(display, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
@@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->hw.adjusted_mode.crtc_vdisplay,
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
+ drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
+ str_yes_no(intel_vrr_is_fixed_rr(pipe_config)),
pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
@@ -319,14 +320,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id,
pipe_config->hw.scaling_filter);
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -343,7 +344,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
@@ -354,20 +355,20 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ display->color.glk_linear_degamma_lut ? "(linear) " : "",
pipe_config->pre_csc_lut ?
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
- if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
+ if (DISPLAY_VER(display) >= 11)
+ ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc);
- if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
- else if (IS_CHERRYVIEW(i915))
+ if (!HAS_GMCH(display))
+ ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc);
+ else if (display->platform.cherryview)
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
intel_vdsc_state_dump(&p, 0, pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index f38c998935b9..b48ed5df7a96 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -78,6 +78,7 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -106,14 +107,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
return level;
}
-static bool has_buf_trans_select(struct drm_i915_private *i915)
+static bool has_buf_trans_select(struct intel_display *display)
{
- return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) < 10 && !display->platform.broxton;
}
-static bool has_iboost(struct drm_i915_private *i915)
+static bool has_iboost(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) == 9 && !display->platform.broxton;
}
/*
@@ -124,25 +125,25 @@ static bool has_iboost(struct drm_i915_private *i915)
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, i),
trans->entries[i].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, i),
trans->entries[i].hsw.trans2);
}
}
@@ -155,7 +156,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
@@ -163,27 +164,25 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, 9),
trans->entries[level].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, 9),
trans->entries[level].hsw.trans2);
}
static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14)
- return XELPDP_PORT_BUF_CTL1(i915, port);
+ return XELPDP_PORT_BUF_CTL1(display, port);
else
return DDI_BUF_CTL(port);
}
@@ -346,7 +345,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -359,14 +357,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (dig_port->ddi_a_4_lanes)
intel_dp->DP |= DDI_A_4_LANES;
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
else
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
+ if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -379,8 +377,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
}
}
-static int icl_calc_tbt_pll_link(struct intel_display *display,
- enum port port)
+static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port)
{
u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -414,15 +411,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -445,7 +441,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
@@ -468,7 +464,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder),
temp);
}
@@ -507,8 +503,8 @@ static u32
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
@@ -516,7 +512,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -578,7 +574,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
@@ -591,11 +587,11 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
@@ -604,7 +600,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(display, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -619,11 +615,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -635,12 +630,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
PORT_SYNC_MODE_MASTER_SELECT(master_select);
}
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
ctl2);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
@@ -654,8 +649,7 @@ void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
@@ -663,7 +657,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
}
@@ -677,27 +671,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
0);
- ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(display, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -706,7 +699,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
if (intel_dp_mst_is_slave_trans(crtc_state))
@@ -725,17 +718,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool enable, u32 hdcp_mask)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
- if (drm_WARN_ON(dev, !wakeref))
+ if (drm_WARN_ON(display->drm, !wakeref))
return -ENXIO;
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
@@ -744,7 +735,6 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(intel_connector);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
@@ -765,12 +755,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
- ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_MODE_SELECT_MASK;
if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
@@ -804,7 +794,6 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
@@ -819,13 +808,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp = intel_de_read(display, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP));
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -846,7 +835,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- for_each_pipe(dev_priv, p) {
+ for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
@@ -856,7 +845,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!trans_wakeref)
continue;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -864,8 +853,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -883,12 +872,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -901,12 +890,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* can assume it's SST.
*/
if (hweight8(dp128b132b_pipe_mask) > 1 ||
- intel_dp_mst_encoder_active_links(dig_port))
+ intel_dp_mst_active_streams(intel_dp))
mst_pipe_mask = dp128b132b_pipe_mask;
}
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
@@ -914,7 +903,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
@@ -922,12 +911,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) {
+ tmp = intel_de_read(display, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
encoder->base.base.id, encoder->base.name, tmp);
}
@@ -1041,8 +1030,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -1050,53 +1038,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
val = TRANS_CLK_SEL_PORT(encoder->port);
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_DISABLED;
else
val = TRANS_CLK_SEL_DISABLED;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct intel_display *display,
enum port port, u8 iboost)
{
u32 tmp;
- tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int level)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1109,7 +1097,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
iboost = trans->entries[level].hsw.i_boost;
@@ -1117,28 +1105,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ drm_err(display->drm, "Invalid I_boost value %u\n", iboost);
return;
}
- _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+ _skl_ddi_set_iboost(display, encoder->port, iboost);
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
- _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+ _skl_ddi_set_iboost(display, PORT_E, iboost);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ if (drm_WARN_ON(display->drm, n_entries < 1))
n_entries = 1;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
@@ -1171,14 +1159,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
@@ -1186,25 +1174,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
intel_dp->hobl_active = is_hobl_buf_trans(trans);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
COEFF_POLARITY | CURSOR_PROGRAM |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
@@ -1216,7 +1204,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
@@ -1227,7 +1215,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
@@ -1236,7 +1224,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1246,12 +1234,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -1261,33 +1249,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1296,13 +1284,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
- intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
@@ -1312,13 +1300,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
@@ -1329,7 +1317,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1338,7 +1326,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1354,21 +1342,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ intel_de_rmw(display, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
- intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
@@ -1378,9 +1366,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
- intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@@ -1490,12 +1478,12 @@ int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
int level, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1504,7 +1492,7 @@ int intel_ddi_level(struct intel_encoder *encoder,
level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
lane);
- if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ if (drm_WARN_ON_ONCE(display->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1514,13 +1502,13 @@ static void
hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
- if (has_iboost(dev_priv))
+ if (has_iboost(display))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* HDMI ignores the rest */
@@ -1529,46 +1517,46 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
signal_levels = DDI_BUF_TRANS_SELECT(level);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
}
-static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+ intel_de_rmw(display, reg, clk_sel_mask, clk_sel);
/*
* "This step and the step before must be
* done with separate register writes."
*/
- intel_de_rmw(i915, reg, clk_off, 0);
+ intel_de_rmw(display, reg, clk_off, 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, 0, clk_off);
+ intel_de_rmw(display, reg, 0, clk_off);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- return !(intel_de_read(i915, reg) & clk_off);
+ return !(intel_de_read(display, reg) & clk_off);
}
static struct intel_shared_dpll *
@@ -1585,14 +1573,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1600,19 +1588,19 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1629,14 +1617,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1644,19 +1632,19 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1673,23 +1661,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1697,19 +1685,19 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1739,14 +1727,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1754,19 +1742,19 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1783,39 +1771,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
* MG does not exist, but the programming is required to ungate DDIC and DDID."
*/
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
icl_ddi_combo_enable_clock(encoder, crtc_state);
}
static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
icl_ddi_combo_disable_clock(encoder);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
@@ -1826,54 +1814,54 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, DDI_CLK_SEL(port),
+ intel_de_write(display, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
- tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
@@ -1934,47 +1922,47 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
/*
* FIXME Not sure if the override affects both
* the PLL selection and the CLK_OFF bit.
*/
- return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+ return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
@@ -2002,30 +1990,30 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
void hsw_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+ return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
@@ -2081,7 +2069,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
bool ddi_clk_needed;
@@ -2101,7 +2089,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&i915->drm, is_mst))
+ if (drm_WARN_ON(display->drm, is_mst))
return;
}
@@ -2116,11 +2104,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&i915->drm, other_encoder) {
+ for_each_intel_encoder(display->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -2135,7 +2123,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
!encoder->is_clock_enabled(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
encoder->base.base.id, encoder->base.name);
@@ -2255,10 +2243,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_CTL(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_CTL(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -2267,10 +2255,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_STATUS(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_STATUS(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -2445,14 +2433,14 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
}
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
@@ -2474,11 +2462,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
* Splitter enable for eDP MSO is limited to certain pipes, on certain
* platforms.
*/
-static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) > 20)
+ if (DISPLAY_VER(display) > 20)
return ~0;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2487,28 +2475,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
- dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
if (!pipe_config->splitter.enable)
return;
- if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
default:
- drm_WARN(&i915->drm, true,
+ drm_WARN(display->drm, true,
"Invalid splitter configuration, dss1=0x%08x\n", dss1);
fallthrough;
case SPLITTER_CONFIGURATION_2_SEGMENT:
@@ -2524,12 +2512,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1 = 0;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
if (crtc_state->splitter.enable) {
@@ -2541,7 +2529,7 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
}
- intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe),
SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
OVERLAP_PIXELS_MASK, dss1);
}
@@ -2549,27 +2537,27 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+ drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2599,13 +2587,13 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val;
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2734,7 +2722,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int ret;
@@ -2778,7 +2765,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2882,16 +2869,15 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- if (DISPLAY_VER(dev_priv) < 11)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 11)
+ drm_WARN_ON(display->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
- drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+ drm_WARN_ON(display->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2908,14 +2894,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
encoder->set_signal_levels(encoder, crtc_state);
@@ -2931,7 +2917,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(state, intel_dp, crtc_state);
- if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(display) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -2979,12 +2965,11 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_enable_clock(encoder, crtc_state);
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
@@ -3022,10 +3007,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
@@ -3050,27 +3034,27 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
static void
mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+ drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3089,10 +3073,9 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (DISPLAY_VER(display) >= 14)
intel_wait_ddi_buf_idle(display, port);
@@ -3100,7 +3083,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
mtl_ddi_disable_d2d(encoder);
if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
}
@@ -3118,7 +3101,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
@@ -3135,12 +3117,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
0);
}
@@ -3160,7 +3142,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -3176,8 +3158,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_clock(encoder);
/* De-select Thunderbolt */
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3187,7 +3169,6 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
@@ -3195,12 +3176,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_ddi_buf_disable(encoder, old_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
@@ -3220,7 +3201,6 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
@@ -3249,6 +3229,8 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
}
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3257,7 +3239,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -3359,12 +3341,12 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(display) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
drm_connector_update_privacy_screen(conn_state);
@@ -3401,7 +3383,6 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3410,11 +3391,11 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
@@ -3423,7 +3404,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3433,7 +3414,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3442,8 +3423,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
udelay(1);
@@ -3454,7 +3435,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -3475,7 +3456,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
@@ -3483,15 +3464,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
+ drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -3522,6 +3503,8 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
+
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
@@ -3567,7 +3550,7 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
@@ -3584,12 +3567,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = old_conn_state->connector;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
}
@@ -3653,16 +3636,16 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
+ if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
intel_update_active_dpll(state, pipe_crtc, encoder);
}
@@ -3678,7 +3661,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_tc_port = intel_encoder_is_tc(encoder);
@@ -3697,7 +3680,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dpio_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
@@ -3765,10 +3748,9 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 dp_tp_ctl;
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
@@ -3781,10 +3763,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (IS_ALDERLAKE_P(dev_priv) &&
+ if (display->platform.alderlake_p &&
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
@@ -3796,11 +3778,11 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 temp;
- temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
@@ -3821,17 +3803,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
break;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp);
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
@@ -3841,28 +3823,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(dev_priv,
+ if (intel_de_wait_for_set(display,
dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_IDLE_DONE, 2))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &dev_priv->display;
-
if (cpu_transcoder == TRANSCODER_EDP)
return false;
if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
- return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
@@ -3892,34 +3872,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
-static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+static enum transcoder bdw_transcoder_master_readout(struct intel_display *display,
enum transcoder cpu_transcoder)
{
u32 master_select;
- if (DISPLAY_VER(dev_priv) >= 11) {
- u32 ctl2 = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 11) {
+ u32 ctl2 = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
} else {
- u32 ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ u32 ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -3936,15 +3916,14 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum transcoder cpu_transcoder;
crtc_state->master_transcoder =
- bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+ bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
@@ -3955,14 +3934,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
if (!trans_wakeref)
continue;
- if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ if (bdw_transcoder_master_readout(display, cpu_transcoder) ==
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
intel_display_power_put(display, power_domain, trans_wakeref);
}
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
crtc_state->master_transcoder != INVALID_TRANSCODER &&
crtc_state->sync_mode_slaves_mask);
}
@@ -4085,11 +4064,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 ddi_func_ctl, ddi_mode, flags = 0;
- ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4131,13 +4109,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp_mst_encoder_active_links(dig_port))
+ if (intel_dp_mst_active_streams(intel_dp))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
@@ -4152,11 +4130,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
/* XXX: DSI transcoder paranoia */
- if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_ddi_read_func_ctl(encoder, pipe_config);
@@ -4164,14 +4142,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_mso_get_config(encoder, pipe_config);
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+ intel_ddi_is_audio_enabled(display, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
@@ -4192,7 +4170,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
@@ -4285,10 +4263,10 @@ static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4382,11 +4360,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
bool fastset = true;
if (intel_encoder_is_tc(encoder)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
@@ -4421,12 +4399,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4441,13 +4419,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ if (display->platform.haswell && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->pch_pfit.force_thru =
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4498,9 +4476,9 @@ static u8
intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
int tile_group_id)
{
+ struct intel_display *display = to_intel_display(ref_crtc_state);
struct drm_connector *connector;
const struct drm_connector_state *conn_state;
- struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
struct intel_atomic_state *state =
to_intel_atomic_state(ref_crtc_state->uapi.state);
u8 transcoders = 0;
@@ -4510,7 +4488,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4542,11 +4520,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
@@ -4618,7 +4596,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4627,7 +4605,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
else
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -4643,15 +4621,14 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
- struct drm_device *dev = dig_port->base.base.dev;
struct drm_privacy_screen *privacy_screen;
- privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&connector->base,
privacy_screen);
} else if (PTR_ERR(privacy_screen) != -ENODEV) {
- drm_warn(dev, "Error getting privacy-screen\n");
+ drm_warn(display->drm, "Error getting privacy-screen\n");
}
}
@@ -4662,7 +4639,6 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *ddc = connector->base.ddc;
@@ -4675,7 +4651,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
if (connector->base.status != connector_status_connected)
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -4692,7 +4668,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
@@ -4708,7 +4684,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4733,11 +4709,11 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static void intel_ddi_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* TODO: Move checking the HDMI link state here as well. */
- drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector);
+ drm_WARN_ON(display->drm, !dig_port->dp.attached_connector);
intel_dp_link_check(encoder);
}
@@ -4800,26 +4776,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(display, GEN8_DE_PORT_ISR) & bit;
}
static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
@@ -4848,7 +4824,7 @@ static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (dig_port->base.port != PORT_A)
return false;
@@ -4859,7 +4835,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
return true;
return false;
@@ -4868,15 +4844,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
static int
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4889,7 +4865,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(dig_port)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Forcing DDI_A_4_LANES for port A\n");
dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
@@ -4898,8 +4874,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
-static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_D_XELPD)
return HPD_PORT_D + port - PORT_D_XELPD;
@@ -4909,8 +4884,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4918,8 +4892,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_TC1 + port - PORT_TC1;
@@ -4927,11 +4900,12 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (HAS_PCH_TGP(dev_priv))
- return tgl_hpd_pin(dev_priv, port);
+ return tgl_hpd_pin(display, port);
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4939,8 +4913,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_C)
return HPD_PORT_TC1 + port - PORT_C;
@@ -4948,31 +4921,34 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (port == PORT_D)
return HPD_PORT_A;
if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+static bool intel_ddi_is_tc(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return port >= PORT_TC1;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return port >= PORT_C;
else
return false;
@@ -5015,21 +4991,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+static bool port_strap_detected(struct intel_display *display, enum port port)
{
/* straps not used on skl+ */
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return true;
switch (port) {
case PORT_A:
- return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
case PORT_B:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
case PORT_C:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
case PORT_D:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
case PORT_E:
return true; /* no strap for DDI-E */
default:
@@ -5043,18 +5019,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
return init_dp || intel_encoder_is_tc(encoder);
}
-static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+static bool assert_has_icl_dsi(struct intel_display *display)
{
- return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
- !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ return !drm_WARN(display->drm, !display->platform.alderlake_p &&
+ !display->platform.tigerlake && DISPLAY_VER(display) != 11,
"Platform does not support DSI\n");
}
-static bool port_in_use(struct drm_i915_private *i915, enum port port)
+static bool port_in_use(struct intel_display *display, enum port port)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
/* FIXME what about second port for dual link DSI? */
if (encoder->port == port)
return true;
@@ -5066,7 +5042,6 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -5078,8 +5053,8 @@ void intel_ddi_init(struct intel_display *display,
if (port == PORT_NONE)
return;
- if (!port_strap_detected(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!port_strap_detected(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c strap not detected\n", port_name(port));
return;
}
@@ -5087,15 +5062,15 @@ void intel_ddi_init(struct intel_display *display,
if (!assert_port_valid(display, port))
return;
- if (port_in_use(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (port_in_use(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c already claimed\n", port_name(port));
return;
}
if (intel_bios_encoder_supports_dsi(devdata)) {
/* BXT/GLK handled elsewhere, for now at least */
- if (!assert_has_icl_dsi(dev_priv))
+ if (!assert_has_icl_dsi(display))
return;
icl_dsi_init(display, devdata);
@@ -5111,7 +5086,7 @@ void intel_ddi_init(struct intel_display *display,
* outputs.
*/
if (intel_hti_uses_phy(display, phy)) {
- drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
}
@@ -5128,20 +5103,20 @@ void intel_ddi_init(struct intel_display *display,
*/
init_dp = true;
init_hdmi = false;
- drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n",
port_name(port));
}
if (!init_dp && !init_hdmi) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
return;
}
if (intel_phy_is_snps(display, phy) &&
- dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
- drm_dbg_kms(&dev_priv->drm,
+ display->snps.phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(display->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
}
@@ -5155,26 +5130,26 @@ void intel_ddi_init(struct intel_display *display,
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c",
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %s%c/PHY %s%c",
port >= PORT_TC1 ? "TC" : "",
port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c%s/PHY %s%c",
port_name(port),
@@ -5182,7 +5157,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c", port_name(port), phy_name(phy));
}
@@ -5218,32 +5193,32 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (display->platform.alderlake_s) {
encoder->enable_clock = adls_ddi_enable_clock;
encoder->disable_clock = adls_ddi_disable_clock;
encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
encoder->get_config = adls_ddi_get_config;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
encoder->enable_clock = rkl_ddi_enable_clock;
encoder->disable_clock = rkl_ddi_disable_clock;
encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
encoder->get_config = rkl_ddi_get_config;
- } else if (IS_DG1(dev_priv)) {
+ } else if (display->platform.dg1) {
encoder->enable_clock = dg1_ddi_enable_clock;
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
@@ -5255,8 +5230,8 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
@@ -5268,36 +5243,36 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
encoder->get_config = skl_ddi_get_config;
- } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (display->platform.broadwell || display->platform.haswell) {
encoder->enable_clock = hsw_ddi_enable_clock;
encoder->disable_clock = hsw_ddi_disable_clock;
encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
@@ -5305,29 +5280,29 @@ void intel_ddi_init(struct intel_display *display,
intel_ddi_buf_trans_init(encoder);
- if (DISPLAY_VER(dev_priv) >= 13)
- encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
- else if (IS_DG1(dev_priv))
- encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
- else if (IS_ROCKETLAKE(dev_priv))
- encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) >= 12)
- encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 11)
- encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ if (DISPLAY_VER(display) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(display, port);
+ else if (display->platform.dg1)
+ encoder->hpd_pin = dg1_hpd_pin(display, port);
+ else if (display->platform.rocketlake)
+ encoder->hpd_pin = rkl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(display, port);
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ encoder->hpd_pin = ehl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 11)
+ encoder->hpd_pin = icl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ encoder->hpd_pin = skl_hpd_pin(display, port);
else
encoder->hpd_pin = intel_hpd_pin_default(port);
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port));
dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5346,7 +5321,7 @@ void intel_ddi_init(struct intel_display *display,
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
@@ -5363,24 +5338,24 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ drm_WARN_ON(display->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
dig_port->connected = bdw_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -5396,7 +5371,7 @@ void intel_ddi_init(struct intel_display *display,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index b7399e9d11cc..655467a6ba87 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -181,20 +181,18 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
}
static inline int
-__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, mask, timeout);
}
-#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, 0, timeout);
}
-#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -205,7 +203,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
+intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -214,15 +212,13 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
intel_uncore_write_fw(__to_uncore(display), reg, val);
}
-#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3afb85fe8536..db524d01e574 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -73,6 +73,7 @@
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -663,7 +664,6 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -696,7 +696,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) &&
- intel_set_memory_cxsr(dev_priv, false))
+ intel_set_memory_cxsr(display, false))
intel_plane_initial_vblank_wait(crtc);
/*
@@ -968,7 +968,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
- old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
+ old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
+ old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
+ old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
}
static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
@@ -1048,12 +1050,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_psr_post_plane_update(state, crtc);
-
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_fbc_post_update(state, crtc);
@@ -1078,6 +1078,8 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+
+ intel_psr_post_plane_update(state, crtc);
}
static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
@@ -1166,13 +1168,14 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_psr_pre_plane_update(state, crtc);
+
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
@@ -1183,8 +1186,6 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_drrs_deactivate(old_crtc_state);
- intel_psr_pre_plane_update(state, crtc);
-
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1220,7 +1221,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1231,7 +1232,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1255,7 +1256,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (!intel_initial_watermarks(state, crtc))
if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
}
/*
@@ -1660,13 +1661,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
- const struct intel_crtc_state *pipe_crtc_state =
- intel_atomic_get_new_crtc_state(state, pipe_crtc);
-
- if (pipe_crtc_state->shared_dpll)
- intel_enable_shared_dpll(pipe_crtc_state);
- }
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -1777,8 +1773,6 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_set_pch_fifo_underrun_reporting(display, pipe, true);
-
- intel_disable_shared_dpll(old_crtc_state);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -1797,12 +1791,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
- const struct intel_crtc_state *old_pipe_crtc_state =
- intel_atomic_get_old_crtc_state(state, pipe_crtc);
-
- intel_disable_shared_dpll(old_pipe_crtc_state);
- }
+ intel_disable_shared_dpll(old_crtc_state);
intel_encoders_post_pll_disable(state, crtc);
@@ -2081,7 +2070,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -2105,7 +2093,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -2121,7 +2109,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -2145,9 +2132,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (display->platform.cherryview)
- chv_disable_pll(dev_priv, pipe);
+ chv_disable_pll(display, pipe);
else if (display->platform.valleyview)
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
else
i9xx_disable_pll(old_crtc_state);
}
@@ -2158,7 +2145,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
if (!display->funcs.wm->initial_watermarks)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */
if (display->platform.i830)
@@ -2341,7 +2328,6 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_joiner_compute_pipe_src(crtc_state);
@@ -2360,7 +2346,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(i915)) {
+ intel_is_dual_link_lvds(display)) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
@@ -2637,6 +2623,15 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
PIPE_LINK_N2(display, transcoder));
}
+static bool
+transcoder_has_vrr(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder);
+}
+
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2701,6 +2696,15 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
@@ -2762,12 +2766,24 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ /*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -3833,7 +3849,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3855,7 +3870,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!bxt_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(display))
break;
/* XXX: this works for video mode only */
@@ -3918,7 +3933,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (transcoder_has_vrr(pipe_config))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -4145,8 +4160,6 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
int linetime_wm;
@@ -4159,7 +4172,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((display->platform.geminilake || display->platform.broxton) &&
- skl_watermark_ipc_enabled(dev_priv))
+ skl_watermark_ipc_enabled(display))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -5385,8 +5398,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
- PIPE_CONF_CHECK_I(vrr.pipeline_full);
- PIPE_CONF_CHECK_I(vrr.guardband);
PIPE_CONF_CHECK_I(vrr.vsync_start);
PIPE_CONF_CHECK_I(vrr.vsync_end);
PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
@@ -5394,6 +5405,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(cmrr.enable);
}
+ if (!fastset || intel_vrr_always_use_vrr_tg(display)) {
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -6427,7 +6443,7 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state);
+ ret = intel_bw_atomic_check(state, any_ms);
if (ret)
goto fail;
@@ -7229,7 +7245,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7443,7 +7459,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* toggling overhead at and above 60 FPS.
*/
intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -7515,10 +7531,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
{
struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_display_rpm_get(display);
/*
* The intel_legacy_cursor_update() fast path takes care
@@ -7552,7 +7567,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7562,7 +7577,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_atomic_helper_unprepare_planes(dev, &state->base);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7670,7 +7685,7 @@ void intel_setup_outputs(struct intel_display *display)
intel_bios_for_each_encoder(display, intel_ddi_init);
if (display->platform.geminilake || display->platform.broxton)
- vlv_dsi_init(dev_priv);
+ vlv_dsi_init(display);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
@@ -7679,7 +7694,7 @@ void intel_setup_outputs(struct intel_display *display)
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
@@ -7754,15 +7769,15 @@ void intel_setup_outputs(struct intel_display *display)
g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
- vlv_dsi_init(dev_priv);
+ vlv_dsi_init(display);
} else if (display->platform.pineview) {
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
} else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
if (display->platform.mobile)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
@@ -7804,10 +7819,10 @@ void intel_setup_outputs(struct intel_display *display)
intel_tv_init(display);
} else if (DISPLAY_VER(display) == 2) {
if (display->platform.i85x)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
- intel_dvo_init(dev_priv);
+ intel_dvo_init(display);
}
for_each_intel_encoder(display->drm, encoder) {
@@ -7817,7 +7832,7 @@ void intel_setup_outputs(struct intel_display *display)
intel_encoder_possible_clones(encoder);
}
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
drm_helper_move_panel_connectors_to_head(display->drm);
}
@@ -8081,6 +8096,9 @@ retry:
goto out;
}
+ if (!crtc_state->hw.active)
+ crtc_state->inherited = false;
+
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index eeb7ae3eaea8..eb6d6f2d0f75 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -80,7 +80,7 @@ struct intel_display_funcs {
/* functions used for watermark calcs for display. */
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
+ void (*update_wm)(struct intel_display *display);
int (*compute_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
@@ -90,8 +90,8 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*get_hw_state)(struct drm_i915_private *i915);
- void (*sanitize)(struct drm_i915_private *i915);
+ void (*get_hw_state)(struct intel_display *display);
+ void (*sanitize)(struct intel_display *display);
};
struct intel_audio_state {
@@ -160,6 +160,7 @@ struct intel_hotplug {
struct {
unsigned long last_jiffies;
int count;
+ int blocked_count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
@@ -170,8 +171,8 @@ struct intel_hotplug {
u32 retry_bits;
struct delayed_work reenable_work;
- u32 long_port_mask;
- u32 short_port_mask;
+ u32 long_hpd_pin_mask;
+ u32 short_hpd_pin_mask;
struct work_struct dig_port_work;
struct work_struct poll_init_work;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index fdedf65bee53..4c208fdb9137 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -24,6 +24,7 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -52,8 +53,11 @@ static struct intel_display *node_to_intel_display(struct drm_info_node *node)
static int intel_display_caps(struct seq_file *m, void *data)
{
struct intel_display *display = node_to_intel_display(m->private);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_printer p = drm_seq_file_printer(m);
+ drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(i915));
+
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
intel_display_params_dump(&display->params, display->drm->driver->name, &p);
@@ -580,13 +584,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
drm_modeset_lock_all(display->drm);
@@ -605,18 +608,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_unlock_all(display->drm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_display_capabilities(struct seq_file *m, void *unused)
-{
- struct intel_display *display = node_to_intel_display(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_display_device_info_print(DISPLAY_INFO(display),
- DISPLAY_RUNTIME_INFO(display), &p);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -690,14 +682,11 @@ static bool
intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(display,
- power_well_id);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ is_enabled = intel_display_power_well_is_enabled(display,
+ power_well_id);
return is_enabled;
}
@@ -820,7 +809,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
- {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -829,7 +817,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
@@ -844,10 +831,10 @@ void intel_display_debugfs_register(struct intel_display *display)
intel_dmc_debugfs_register(display);
intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
- intel_hpd_debugfs_register(i915);
+ intel_hpd_debugfs_register(display);
intel_opregion_debugfs_register(display);
intel_psr_debugfs_register(display);
- intel_wm_debugfs_register(i915);
+ intel_wm_debugfs_register(display);
intel_display_debugfs_params(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 717286981687..368b0d3417c2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -143,9 +143,11 @@ struct intel_display_platforms {
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
@@ -156,9 +158,9 @@ struct intel_display_platforms {
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
-#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
@@ -166,9 +168,10 @@ struct intel_display_platforms {
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
-#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
@@ -189,10 +192,7 @@ struct intel_display_platforms {
((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
-#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
-#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 31740a677dd8..efee8925987e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -82,7 +82,6 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
if (!HAS_DISPLAY(display))
@@ -94,7 +93,7 @@ void intel_display_driver_init_hw(struct intel_display *display)
intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
- intel_display_wa_apply(i915);
+ intel_display_wa_apply(display);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -181,8 +180,6 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display)
void intel_display_driver_early_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
@@ -193,12 +190,12 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->pps.mutex);
mutex_init(&display->hdcp.hdcp_mutex);
- intel_display_irq_init(i915);
+ intel_display_irq_init(display);
intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
intel_audio_hooks_init(display);
- intel_dpll_init_clock_hook(i915);
+ intel_dpll_init_clock_hook(display);
intel_init_display_hooks(display);
intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
@@ -255,11 +252,11 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- ret = intel_dbuf_init(i915);
+ ret = intel_dbuf_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- ret = intel_bw_init(i915);
+ ret = intel_bw_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
@@ -315,11 +312,9 @@ static void set_display_access(struct intel_display *display,
*/
void intel_display_driver_enable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
set_display_access(display, true, NULL);
- intel_hpd_enable_detection_work(i915);
+ intel_hpd_enable_detection_work(display);
}
/**
@@ -341,9 +336,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display)
*/
void intel_display_driver_disable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_hpd_disable_detection_work(i915);
+ intel_hpd_disable_detection_work(display);
set_display_access(display, false, current);
}
@@ -429,7 +422,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
if (!HAS_DISPLAY(display))
return 0;
- intel_wm_init(i915);
+ intel_wm_init(display);
intel_panel_sanitize_ssc(display);
@@ -483,7 +476,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(display))
- ilk_wm_sanitize(i915);
+ ilk_wm_sanitize(display);
return 0;
@@ -498,7 +491,6 @@ err_mode_config:
/* part #3: call after gem init */
int intel_display_driver_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!HAS_DISPLAY(display))
@@ -524,9 +516,9 @@ int intel_display_driver_probe(struct intel_display *display)
intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
+ intel_hpd_init(display);
- skl_watermark_ipc_init(i915);
+ skl_watermark_ipc_init(display);
return 0;
}
@@ -558,7 +550,7 @@ void intel_display_driver_register(struct intel_display *display)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(display->drm);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
intel_fbdev_setup(i915);
@@ -600,7 +592,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(i915);
+ intel_hpd_poll_fini(display);
intel_unregister_dsm_handler();
@@ -733,7 +725,6 @@ __intel_display_driver_resume(struct intel_display *display,
void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -761,7 +752,7 @@ void intel_display_driver_resume(struct intel_display *display)
if (!ret)
ret = __intel_display_driver_resume(display, state, &ctx);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index aa23bb817805..d2a35e3630b1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -14,6 +14,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dmc_wl.h"
@@ -115,9 +116,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display,
}
static void
-intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_handle_vblank(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
@@ -125,59 +125,59 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
/**
* ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
new_val = dev_priv->irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->irq_mask &&
- !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
+ !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
intel_de_write(display, DEIMR, dev_priv->irq_mask);
intel_de_posting_read(display, DEIMR);
}
}
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_enable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, bits);
+ ilk_update_display_irq(display, bits, bits);
}
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_disable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, 0);
+ ilk_update_display_irq(display, bits, 0);
}
/**
* bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
@@ -194,83 +194,83 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
/**
* bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @display: display device
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->display.irq.de_irq_mask[pipe];
+ new_val = display->irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
- dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ if (new_val != display->irq.de_irq_mask[pipe]) {
+ display->irq.de_irq_mask[pipe] = new_val;
intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
-void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+void bdw_enable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, bits);
+ bdw_update_pipe_irq(display, pipe, bits, bits);
}
-void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+void bdw_disable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, 0);
+ bdw_update_pipe_irq(display, pipe, bits, 0);
}
/**
* ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
intel_de_write(display, SDEIMR, sdeimr);
intel_de_posting_read(display, SDEIMR);
}
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, bits);
+ ibx_display_interrupt_update(display, bits, bits);
}
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, 0);
+ ibx_display_interrupt_update(display, bits, 0);
}
u32 i915_pipestat_enable_mask(struct intel_display *display,
@@ -318,48 +318,48 @@ out:
return enable_mask;
}
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
+ display->irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
intel_de_posting_read(display, reg);
}
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
+ display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
@@ -368,24 +368,22 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_I85X(i915))
+ if (display->platform.i85x)
return true;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
return true;
- return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
+ return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
}
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
+ * @display: display device
*/
-void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+void i915_enable_asle_pipestat(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
if (!intel_opregion_asle_present(display))
return;
@@ -395,22 +393,21 @@ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (DISPLAY_VER(dev_priv) >= 4)
- i915_enable_pipestat(dev_priv, PIPE_A,
+ i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (DISPLAY_VER(display) >= 4)
+ i915_enable_pipestat(display, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -427,7 +424,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -440,20 +437,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
-static void flip_done_handler(struct drm_i915_private *i915,
+static void flip_done_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- spin_lock(&i915->drm.event_lock);
+ spin_lock(&display->drm->event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
@@ -461,25 +457,21 @@ static void flip_done_handler(struct drm_i915_private *i915,
crtc->flip_done_event = NULL;
}
- spin_unlock(&i915->drm.event_lock);
+ spin_unlock(&display->drm->event_lock);
}
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
@@ -487,58 +479,56 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 res1, res2;
- if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 3)
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
else
res1 = 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
res1, res2);
}
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
intel_de_write(display,
- PIPESTAT(dev_priv, pipe),
+ PIPESTAT(display, pipe),
PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
+ display->irq.pipestat_irq_mask[pipe] = 0;
}
}
-void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+void i9xx_pipestat_irq_ack(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->display.irq.vlv_display_irqs_enabled) {
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
i915_reg_t reg;
u32 status_mask, enable_mask, iir_bit = 0;
@@ -566,12 +556,12 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
+ status_mask |= display->irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
- reg = PIPESTAT(dev_priv, pipe);
+ reg = PIPESTAT(display, pipe);
pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -592,22 +582,21 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->irq_lock);
}
-void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i915_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -617,22 +606,21 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(display);
}
-void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i965_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -645,21 +633,20 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void valleyview_pipestat_irq_handler(struct intel_display *display,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -669,18 +656,17 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ drm_dbg(display->drm, "PCH audio power change on port %d\n",
port_name(port));
}
@@ -691,26 +677,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
+ drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
@@ -753,14 +739,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
{}
};
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static void ivb_err_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (err_int & ERR_INT_INVALID_GTT_PTE)
drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
@@ -768,17 +753,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
if (err_int & ERR_INT_INVALID_PTE_DATA)
drm_err_ratelimited(display->drm, "Invalid PTE data\n");
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev_priv))
- ivb_pipe_crc_irq_handler(dev_priv, pipe);
+ if (display->platform.ivybridge)
+ ivb_pipe_crc_irq_handler(display, pipe);
else
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
}
fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
@@ -790,34 +775,32 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
intel_de_write(display, GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(display, pipe);
intel_de_write(display, SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ drm_dbg(display->drm, "PCH audio power change on port %c\n",
port_name(port));
}
@@ -828,20 +811,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
+ drm_dbg(display->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
+ drm_dbg(display->drm, "Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev_priv);
+ cpt_serr_int_handler(display);
}
static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
@@ -894,14 +877,14 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
intel_dp_aux_irq_handler(display);
@@ -910,23 +893,23 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (de_iir & DE_GTT_FAULT)
ilk_gtt_fault_irq_handler(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
}
/* check event from PCH */
@@ -934,34 +917,34 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
u32 pch_iir = intel_de_read(display, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
else
- ibx_irq_handler(dev_priv, pch_iir);
+ ibx_irq_handler(display, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
- if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
+ if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
}
-void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev_priv);
+ ivb_err_int_handler(display);
if (de_iir & DE_EDP_PSR_INT_HSW) {
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
@@ -977,35 +960,35 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
}
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
}
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return 0;
- else if (DISPLAY_VER(dev_priv) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1015,7 +998,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC2 |
TGL_DE_PORT_AUX_USBC3 |
TGL_DE_PORT_AUX_USBC4;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1027,12 +1010,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC6;
mask = GEN8_AUX_CHANNEL_A;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
mask |= ICL_AUX_CHANNEL_F;
mask |= ICL_AUX_CHANNEL_E;
}
@@ -1040,10 +1023,8 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
-static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
@@ -1195,15 +1176,14 @@ gen8_pipe_fault_handlers(struct intel_display *display)
return bdw_pipe_fault_handlers;
}
-static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+static void intel_pmdemand_irq_handler(struct intel_display *display)
{
- wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+ wake_up_all(&display->pmdemand.waitqueue);
}
static void
-gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &dev_priv->display;
bool found = false;
if (HAS_DBUF_OVERLAP_DETECTION(display)) {
@@ -1213,20 +1193,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Error waiting for Punit PM Demand Response\n");
- intel_pmdemand_irq_handler(dev_priv);
+ intel_pmdemand_irq_handler(display);
found = true;
}
if (iir & XELPDP_RM_TIMEOUT) {
u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
- drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
} else if (iir & GEN8_DE_MISC_GSE) {
@@ -1239,12 +1219,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 psr_iir;
i915_reg_t iir_reg;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv,
- intel_dp->psr.transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ iir_reg = TRANS_PSR_IIR(display,
+ intel_dp->psr.transcoder);
else
iir_reg = EDP_PSR_IIR;
@@ -1256,19 +1236,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_psr_irq_handler(intel_dp, psr_iir);
/* prior GEN12 only have one EDP PSR */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
break;
}
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
+ drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
-static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
+static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
u32 te_trigger)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -1278,7 +1257,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -1294,12 +1273,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
- drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
+ drm_err(display->drm, "DSI trancoder not configured in command mode\n");
return;
}
/* Get PIPE for handling VBLANK event */
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1311,28 +1290,28 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
pipe = PIPE_C;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE\n");
+ drm_err(display->drm, "Invalid PIPE\n");
return;
}
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
-static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 pica_ier = 0;
*pica_iir = 0;
@@ -1346,7 +1325,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
+ drm_WARN_ON(display->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
@@ -1359,32 +1338,32 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
-void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 iir;
enum pipe pipe;
- drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+ drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
intel_de_write(display, GEN8_DE_MISC_IIR, iir);
- gen8_de_misc_irq_handler(dev_priv, iir);
+ gen8_de_misc_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE MISC)!\n");
}
}
- if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
intel_de_write(display, GEN11_DE_HPD_IIR, iir);
- gen11_hpd_irq_handler(dev_priv, iir);
+ gen11_hpd_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -1396,52 +1375,52 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PORT_IIR, iir);
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
+ if (iir & gen8_de_port_aux_mask(display)) {
intel_dp_aux_irq_handler(display);
found = true;
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
+ bxt_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
}
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
(iir & BXT_DE_PORT_GMBUS)) {
intel_gmbus_irq_handler(display);
found = true;
}
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
- gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
+ gen11_dsi_te_interrupt_handler(display, te_trigger);
found = true;
}
}
if (!found)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"Unexpected DE Port interrupt\n");
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PORT)!\n");
}
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -1449,7 +1428,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -1457,29 +1436,29 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
- if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
- flip_done_handler(dev_priv, pipe);
+ if (iir & gen8_de_pipe_flip_done_mask(display))
+ flip_done_handler(display, pipe);
- if (HAS_DSB(dev_priv)) {
+ if (HAS_DSB(display)) {
if (iir & GEN12_DSB_INT(INTEL_DSB_0))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
if (iir & GEN12_DSB_INT(INTEL_DSB_1))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
if (iir & GEN12_DSB_INT(INTEL_DSB_2))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
- fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
+ fault_errors = iir & gen8_de_pipe_fault_mask(display);
if (fault_errors)
intel_pipe_fault_irq_handler(display,
gen8_pipe_fault_handlers(display),
@@ -1495,31 +1474,30 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
+ gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
if (iir) {
if (pica_iir)
- xelpdp_pica_irq_handler(dev_priv, pica_iir);
+ xelpdp_pica_irq_handler(display, pica_iir);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
+ icp_irq_handler(display, iir);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- spt_irq_handler(dev_priv, iir);
+ spt_irq_handler(display, iir);
else
- cpt_irq_handler(dev_priv, iir);
+ cpt_irq_handler(display, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"The master control interrupt lied (SDE)!\n");
}
}
}
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
{
- struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1532,20 +1510,17 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
return iir;
}
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
{
- struct intel_display *display = &i915->display;
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(display);
}
-void gen11_display_irq_handler(struct drm_i915_private *i915)
+void gen11_display_irq_handler(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 disp_ctl;
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_block(display);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
@@ -1553,16 +1528,15 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- gen8_de_irq_handler(i915, disp_ctl);
+ gen8_de_irq_handler(display, disp_ctl);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_unblock(display);
}
-static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
/*
* Vblank/CRC interrupts fail to wake the device up from C2+.
@@ -1570,41 +1544,41 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* the problem. There is a small power cost so we do this
* only when vblank/CRC interrupts are actually enabled.
*/
- if (i915->display.irq.vblank_enabled++ == 0)
+ if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
- if (--i915->display.irq.vblank_enabled == 0)
+ if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
{
- spin_lock_irq(&i915->drm.vblank_time_lock);
+ spin_lock_irq(&display->drm->vblank_time_lock);
if (enable)
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
else
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
- spin_unlock_irq(&i915->drm.vblank_time_lock);
+ spin_unlock_irq(&display->drm->vblank_time_lock);
}
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1612,41 +1586,43 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
void i8xx_disable_vblank(struct drm_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
return i8xx_enable_vblank(crtc);
}
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
i8xx_disable_vblank(crtc);
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
}
int i965_enable_vblank(struct drm_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
+ i915_enable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1655,32 +1631,34 @@ int i965_enable_vblank(struct drm_crtc *crtc)
void i965_disable_vblank(struct drm_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
+ i915_disable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
int ilk_enable_vblank(struct drm_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(display, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(crtc);
return 0;
@@ -1688,14 +1666,15 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
void ilk_disable_vblank(struct drm_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(display, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1753,13 +1732,13 @@ int bdw_enable_vblank(struct drm_crtc *_crtc)
schedule_work(&display->irq.vblank_dc_work);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(&crtc->base);
return 0;
@@ -1777,7 +1756,7 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
return;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
@@ -1892,11 +1871,11 @@ void vlv_display_error_irq_handler(struct intel_display *display,
vlv_page_table_error_irq_handler(display, dpinvgtt);
}
-static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
@@ -1904,31 +1883,29 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(to_intel_uncore(display->drm),
VLV_ERROR_REGS);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
- i9xx_pipestat_irq_reset(dev_priv);
+ i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_reset(struct intel_display *display)
{
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- _vlv_display_irq_reset(dev_priv);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(display);
}
-void i9xx_display_irq_reset(struct drm_i915_private *i915)
+void i9xx_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (I915_HAS_HOTPLUG(i915)) {
- i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
+ if (HAS_HOTPLUG(display)) {
+ i915_hotplug_interrupt_update(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
}
- i9xx_pipestat_irq_reset(i915);
+ i9xx_pipestat_irq_reset(display);
}
static u32 vlv_error_mask(void)
@@ -1937,17 +1914,17 @@ static u32 vlv_error_mask(void)
return VLV_ERROR_PAGE_TABLE;
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+void vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
+ if (!display->irq.vlv_display_irqs_enabled)
return;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT,
DPINVGTT_STATUS_MASK_CHV |
DPINVGTT_EN_MASK_CHV);
@@ -1961,9 +1938,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(display, pipe)
+ i915_enable_pipestat(display, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -1972,29 +1949,28 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_LPE_PIPE_B_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
-void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen8_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
@@ -2003,22 +1979,22 @@ void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
}
-void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen11_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2026,10 +2002,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
continue;
intel_de_write(display,
- TRANS_PSR_IMR(dev_priv, trans),
+ TRANS_PSR_IMR(display, trans),
0xffffffff);
intel_de_write(display,
- TRANS_PSR_IIR(dev_priv, trans),
+ TRANS_PSR_IIR(display, trans),
0xffffffff);
}
} else {
@@ -2037,7 +2013,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
@@ -2045,7 +2021,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
@@ -2054,12 +2030,12 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -2069,18 +2045,18 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_irq_mask[pipe],
+ ~display->irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -2090,7 +2066,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
spin_unlock_irq(&dev_priv->irq_lock);
@@ -2110,9 +2086,9 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 mask;
if (HAS_PCH_NOP(dev_priv))
@@ -2128,40 +2104,45 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
+ if (display->irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.vlv_display_irqs_enabled = true;
+ display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- _vlv_display_irq_reset(dev_priv);
- vlv_display_irq_postinstall(dev_priv);
+ _vlv_display_irq_reset(display);
+ vlv_display_irq_postinstall(display);
}
}
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
+ if (!display->irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.vlv_display_irqs_enabled = false;
+ display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- _vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(display);
}
-void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
u32 display_mask, extra_mask;
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -2182,59 +2163,59 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_DP_A_HOTPLUG);
}
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
- if (IS_IRONLAKE_M(i915))
+ if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
i915->irq_mask = ~display_mask;
- ibx_irq_postinstall(i915);
+ ibx_irq_postinstall(display);
intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
display_mask | extra_mask);
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915);
-static void icp_irq_postinstall(struct drm_i915_private *i915);
+static void mtp_irq_postinstall(struct intel_display *display);
+static void icp_irq_postinstall(struct intel_display *display);
-void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen8_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
+ u32 de_port_masked = gen8_de_port_aux_mask(display);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(display) >= 14)
+ mtp_irq_postinstall(display);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
+ icp_irq_postinstall(display);
else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
+ ibx_irq_postinstall(display);
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(display, &port))
@@ -2244,25 +2225,25 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_DBUF_OVERLAP_DETECTION(display))
de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
- if (HAS_DSB(dev_priv))
+ if (HAS_DSB(display))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
de_port_enables = de_port_masked;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
+ else if (display->platform.broadwell)
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2270,19 +2251,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
continue;
intel_display_irq_regs_assert_irr_is_zero(display,
- TRANS_PSR_IIR(dev_priv, trans));
+ TRANS_PSR_IIR(display, trans));
}
} else {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
- for_each_pipe(dev_priv, pipe) {
- dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ for_each_pipe(display, pipe) {
+ display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
+ display->irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -2291,7 +2272,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
de_misc_masked);
- if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
+ if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -2301,9 +2282,8 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
@@ -2315,43 +2295,37 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen11_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+void dg1_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(i915);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void intel_display_irq_init(struct drm_i915_private *i915)
+void intel_display_irq_init(struct intel_display *display)
{
- i915->drm.vblank_disable_immediate = true;
+ display->drm->vblank_disable_immediate = true;
- intel_hotplug_irq_init(i915);
+ intel_hotplug_irq_init(display);
- INIT_WORK(&i915->display.irq.vblank_dc_work,
- intel_display_vblank_dc_work);
+ INIT_WORK(&display->irq.vblank_dc_work, intel_display_vblank_dc_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index d9867cd0a220..f72727768351 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -12,28 +12,27 @@
enum pipe;
struct drm_crtc;
-struct drm_i915_private;
struct intel_display;
-void valleyview_enable_display_irqs(struct drm_i915_private *i915);
-void valleyview_disable_display_irqs(struct drm_i915_private *i915);
+void valleyview_enable_display_irqs(struct intel_display *display);
+void valleyview_disable_display_irqs(struct intel_display *display);
-void ilk_update_display_irq(struct drm_i915_private *i915,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_enable_display_irq(struct intel_display *display, u32 bits);
+void ilk_disable_display_irq(struct intel_display *display, u32 bits);
-void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask);
-void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
-void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask);
+void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
-void ibx_display_interrupt_update(struct drm_i915_private *i915,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits);
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
+void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -46,41 +45,41 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl);
-void gen11_display_irq_handler(struct drm_i915_private *i915);
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
+void gen11_display_irq_handler(struct intel_display *display);
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
-void i9xx_display_irq_reset(struct drm_i915_private *i915);
-void vlv_display_irq_reset(struct drm_i915_private *i915);
-void gen8_display_irq_reset(struct drm_i915_private *i915);
-void gen11_display_irq_reset(struct drm_i915_private *i915);
+void i9xx_display_irq_reset(struct intel_display *display);
+void vlv_display_irq_reset(struct intel_display *display);
+void gen8_display_irq_reset(struct intel_display *display);
+void gen11_display_irq_reset(struct intel_display *display);
-void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void ilk_de_irq_postinstall(struct drm_i915_private *i915);
-void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void gen11_de_irq_postinstall(struct drm_i915_private *i915);
-void dg1_de_irq_postinstall(struct drm_i915_private *i915);
+void vlv_display_irq_postinstall(struct intel_display *display);
+void ilk_de_irq_postinstall(struct intel_display *display);
+void gen8_de_irq_postinstall(struct intel_display *display);
+void gen11_de_irq_postinstall(struct intel_display *display);
+void dg1_de_irq_postinstall(struct intel_display *display);
u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
-void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_enable_asle_pipestat(struct drm_i915_private *i915);
+void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_enable_asle_pipestat(struct intel_display *display);
-void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]);
void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
-void intel_display_irq_init(struct drm_i915_private *i915);
+void intel_display_irq_init(struct intel_display *display);
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable);
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index f7171e6932dc..c78315eb44fa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -16,6 +16,7 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
@@ -204,7 +205,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(display->drm->dev))
+ if (intel_display_rpm_suspended(display))
return false;
is_enabled = true;
@@ -455,7 +456,6 @@ static bool
intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -473,8 +473,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display,
goto out_verify;
cancel_async_put_work(power_domains, false);
- intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
- fetch_and_zero(&power_domains->async_put_wakeref));
+ intel_display_rpm_put_raw(display,
+ fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
@@ -512,9 +512,10 @@ __intel_display_power_get_domain(struct intel_display *display,
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ struct ref_tracker *wakeref;
+
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(display, domain);
@@ -539,12 +540,11 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get_if_in_use(display);
if (!wakeref)
return NULL;
@@ -560,7 +560,7 @@ intel_display_power_get_if_enabled(struct intel_display *display,
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
wakeref = NULL;
}
@@ -623,12 +623,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_display *display = container_of(power_domains,
struct intel_display,
power.domains);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get_noresume(rpm);
+ wakeref = intel_display_rpm_get_noresume(display);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
@@ -636,7 +634,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
__intel_display_power_put_domain(display, domain);
}
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static void
@@ -644,11 +642,10 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct intel_display *display = container_of(work, struct intel_display,
power.domains.async_put_work.work);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = NULL;
+ struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
+
+ new_work_wakeref = intel_display_rpm_get_raw(display);
mutex_lock(&power_domains->lock);
@@ -688,9 +685,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ intel_display_rpm_put_raw(display, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+ intel_display_rpm_put_raw(display, new_work_wakeref);
}
/**
@@ -711,10 +708,10 @@ void __intel_display_power_put_async(struct intel_display *display,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ struct ref_tracker *work_wakeref;
+
+ work_wakeref = intel_display_rpm_get_raw(display);
delay_ms = delay_ms >= 0 ? delay_ms : 100;
@@ -746,9 +743,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(rpm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -765,7 +762,6 @@ out_verify:
*/
void intel_display_power_flush_work(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -786,7 +782,7 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
}
/**
@@ -824,10 +820,8 @@ void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
#else
/**
@@ -846,10 +840,8 @@ void intel_display_power_put(struct intel_display *display,
void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_display_rpm_put_unchecked(display);
}
#endif
@@ -1381,18 +1373,18 @@ static void hsw_enable_pc8(struct intel_display *display)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
hsw_disable_lcpll(display, true, true);
}
static void hsw_disable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
@@ -1979,7 +1971,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
@@ -1993,7 +1984,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index e80e1fd611ca..ab1163744bc5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
XE3LPD_PW_C_POWER_DOMAINS,
XE3LPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
POWER_DOMAIN_INIT);
static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8ec87ffd87d2..b9b4359751cc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -186,22 +187,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -752,8 +749,9 @@ void gen9_sanitize_dc_state(struct intel_display *display)
void gen9_set_dc_state(struct intel_display *display, u32 state)
{
struct i915_power_domains *power_domains = &display->power.domains;
- u32 val;
+ bool dc6_was_enabled, enable_dc6;
u32 mask;
+ u32 val;
if (!HAS_DISPLAY(display))
return;
@@ -772,11 +770,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
+ enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ if (!dc6_was_enabled && enable_dc6)
+ intel_dmc_update_dc6_allowed_count(display, true);
+
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
+ if (!enable_dc6 && dc6_was_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
power_domains->dc_state = val & mask;
}
@@ -816,7 +822,8 @@ static void assert_can_enable_dc5(struct intel_display *display)
(intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_display_rpm_held(display);
assert_dmc_loaded(display);
}
@@ -1226,7 +1233,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
vlv_init_display_clock_gating(display);
spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
+ valleyview_enable_display_irqs(display);
spin_unlock_irq(&dev_priv->irq_lock);
/*
@@ -1236,8 +1243,8 @@ static void vlv_display_power_well_init(struct intel_display *display)
if (display->power.domains.initializing)
return;
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) {
@@ -1255,7 +1262,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
struct drm_i915_private *dev_priv = to_i915(display->drm);
spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
+ valleyview_disable_display_irqs(display);
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
@@ -1265,7 +1272,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
/* Prevent us from re-enabling polling on accident in late suspend */
if (!display->drm->dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
}
static void vlv_display_power_well_enable(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1f2798404f2c..1dbd3e841df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -107,14 +107,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only)
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_cx0_pll_power_save_wa(display);
- intel_hpd_init(i915);
+ intel_hpd_init(display);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
new file mode 100644
index 000000000000..48da67dd0136
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "intel_display_rpm.h"
+#include "intel_runtime_pm.h"
+
+static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return &i915->runtime_pm;
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_runtime_pm_get_raw(display_to_rpm(display));
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return intel_runtime_pm_get(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ return intel_runtime_pm_get_noresume(display_to_rpm(display));
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(display_to_rpm(display), wakeref);
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ intel_runtime_pm_put_unchecked(display_to_rpm(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ return intel_runtime_pm_suspended(display_to_rpm(display));
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ assert_rpm_wakelock_held(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ disable_rpm_wakeref_asserts(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ enable_rpm_wakeref_asserts(display_to_rpm(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h
new file mode 100644
index 000000000000..6ef48515f84b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_RPM__
+#define __INTEL_DISPLAY_RPM__
+
+#include <linux/types.h>
+
+struct intel_display;
+struct ref_tracker;
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display);
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref);
+
+#define __with_intel_display_rpm(__display, __wakeref) \
+ for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \
+ intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL)
+
+#define with_intel_display_rpm(__display) \
+ __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref))
+
+/* Only for special cases. */
+bool intel_display_rpm_suspended(struct intel_display *display);
+
+void assert_display_rpm_held(struct intel_display *display);
+void intel_display_rpm_assert_block(struct intel_display *display);
+void intel_display_rpm_assert_unblock(struct intel_display *display);
+
+/* Only for display power implementation. */
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display);
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref);
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display);
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display);
+void intel_display_rpm_put_unchecked(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_RPM__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 99a6fd2900b9..94468a9d2e0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -581,7 +581,7 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct __intel_global_objs_state *global_objs;
int num_global_objs;
@@ -1620,7 +1620,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
- bool paused;
+ int pause_counter;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1658,7 +1658,6 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
- bool link_trained;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1683,6 +1682,7 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ bool active;
/* common rate,lane_count configs in bw order */
int num_configs;
#define INTEL_DP_MAX_LANE_COUNT 4
@@ -1739,7 +1739,7 @@ struct intel_dp {
struct {
struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mgr;
- int active_links;
+ int active_streams;
} mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index e5a8022db664..da429c332914 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,38 +3,38 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-static void gen11_display_wa_apply(struct drm_i915_private *i915)
+static void gen11_display_wa_apply(struct intel_display *display)
{
/* Wa_14010594013 */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
-static void xe_d_display_wa_apply(struct drm_i915_private *i915)
+static void xe_d_display_wa_apply(struct intel_display *display)
{
/* Wa_14013723622 */
- intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
+ intel_de_rmw(display, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
-static void adlp_display_wa_apply(struct drm_i915_private *i915)
+static void adlp_display_wa_apply(struct intel_display *display)
{
/* Wa_22011091694:adlp */
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
/* Bspec/49189 Initialize Sequence */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-void intel_display_wa_apply(struct drm_i915_private *i915)
+void intel_display_wa_apply(struct intel_display *display)
{
- if (IS_ALDERLAKE_P(i915))
- adlp_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 12)
- xe_d_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 11)
- gen11_display_wa_apply(i915);
+ if (display->platform.alderlake_p)
+ adlp_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 12)
+ xe_d_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 11)
+ gen11_display_wa_apply(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index be644ab6ae00..babd9d16603d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -8,14 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
-void intel_display_wa_apply(struct drm_i915_private *i915);
+void intel_display_wa_apply(struct intel_display *display);
#ifdef I915
-static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+static inline bool intel_display_needs_wa_16023588340(struct intel_display *display)
+{
+ return false;
+}
#else
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 0813fb9b5823..dad7192132ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa6944e55d95..98f80a6c63e8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
+#include "intel_display_power_well.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_step.h"
@@ -57,6 +59,10 @@ struct intel_dmc {
const char *fw_path;
u32 max_fw_size; /* bytes */
u32 version;
+ struct {
+ u32 dc5_start;
+ u32 count;
+ } dc6_allowed;
struct dmc_fw_info {
u32 mmio_count;
i915_reg_t mmioaddr[20];
@@ -595,7 +601,7 @@ void intel_dmc_load_program(struct intel_display *display)
disable_all_event_handlers(display);
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_display_rpm_held(display);
preempt_disable();
@@ -1232,18 +1238,57 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct
DMC_VERSION_MINOR(snapshot->version));
}
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display,
+ bool start_tracking)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 dc5_cur_count;
+
+ if (DISPLAY_VER(dmc->display) < 14)
+ return;
+
+ dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT);
+
+ if (!start_tracking)
+ dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start;
+
+ dmc->dc6_allowed.dc5_start = dc5_cur_count;
+}
+
+static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count)
+{
+ struct i915_power_domains *power_domains = &display->power.domains;
+ struct intel_dmc *dmc = display_to_dmc(display);
+ bool dc6_enabled;
+
+ if (DISPLAY_VER(display) < 14)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+ dc6_enabled = intel_de_read(display, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6;
+ if (dc6_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
+ *count = dmc->dc6_allowed.count;
+ mutex_unlock(&power_domains->lock);
+
+ return true;
+}
+
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_display *display = m->private;
struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc = display_to_dmc(display);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+ u32 dc6_allowed_count;
if (!HAS_DMC(display))
return -ENODEV;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
@@ -1287,7 +1332,11 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
}
seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
- if (i915_mmio_reg_valid(dc6_reg))
+
+ if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count))
+ seq_printf(m, "DC5 -> DC6 allowed count: %d\n",
+ dc6_allowed_count);
+ else if (i915_mmio_reg_valid(dc6_reg))
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(display, dc6_reg));
@@ -1299,7 +1348,7 @@ out:
intel_de_read(display, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 44cecef98e73..c78426eb4cd5 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -26,6 +26,7 @@ void intel_dmc_debugfs_register(struct intel_display *display);
struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display);
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
void assert_dmc_loaded(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a236b5fc7a3d..aeb14a5455fd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -62,6 +62,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -87,7 +88,6 @@
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_runtime_pm.h"
#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
@@ -172,10 +172,28 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int max_rate;
+
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
- return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+ max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+ else
+ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+
+ /*
+ * Some broken eDP sinks illegally declare support for
+ * HBR3 without TPS4, and are unable to produce a stable
+ * output. Reject HBR3 when TPS4 is not available.
+ */
+ if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
+ encoder->base.base.id, encoder->base.name);
+ max_rate = 540000;
+ }
- return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ return max_rate;
}
static int max_dprx_lane_count(struct intel_dp *intel_dp)
@@ -3204,7 +3222,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -3568,7 +3586,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
- intel_dp->link_trained = true;
+ intel_dp->link.active = true;
}
}
@@ -4170,6 +4188,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
intel_dp->num_sink_rates = 0;
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
@@ -4180,10 +4201,7 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
sink_rates, sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
- int val = le16_to_cpu(sink_rates[i]);
-
- if (val == 0)
- break;
+ int rate;
/* Value read multiplied by 200kHz gives the per-lane
* link rate in kHz. The source rates are, however,
@@ -4191,7 +4209,24 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
* back to symbols is
* (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
*/
- intel_dp->sink_rates[i] = (val * 200) / 10;
+ rate = le16_to_cpu(sink_rates[i]) * 200 / 10;
+
+ if (rate == 0)
+ break;
+
+ /*
+ * Some broken eDP sinks illegally declare support for
+ * HBR3 without TPS4, and are unable to produce a stable
+ * output. Reject HBR3 when TPS4 is not available.
+ */
+ if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ intel_dp->sink_rates[i] = rate;
}
intel_dp->num_sink_rates = i;
}
@@ -4969,8 +5004,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
-
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
@@ -4985,7 +5018,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->mst.active_links > 0 && link_ok &&
+ if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5046,7 +5079,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->link_trained)
+ if (!intel_dp->link.active)
return false;
/*
@@ -6117,7 +6150,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
spin_unlock_irq(&i915->irq_lock);
if (need_work)
- intel_hpd_schedule_detection(i915);
+ intel_hpd_schedule_detection(display);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6144,13 +6177,12 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd ||
- intel_runtime_pm_suspended(&i915->runtime_pm) ||
+ intel_display_rpm_suspended(display) ||
!intel_pps_have_panel_power_or_vdd(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -6326,7 +6358,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev_priv)) {
+ if (intel_get_lvds_encoder(display)) {
drm_WARN_ON(display->drm,
!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
drm_info(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index ec27bbd70bcf..0496061203fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -247,7 +247,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
+ intel_wakeref_t pps_wakeref = NULL;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -272,7 +272,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(display, aux_domain);
- pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * The PPS state needs to be locked for:
+ * - eDP on all platforms, since AUX transfers on eDP need VDD power
+ * (either forced or via panel power) which depends on the PPS
+ * state.
+ * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
+ * since changing the PPS state (via a parallel modeset for
+ * instance) may interfere with the AUX transfers on a non-eDP
+ * output as well.
+ */
+ if (intel_dp_is_edp(intel_dp) ||
+ display->platform.valleyview || display->platform.cherryview)
+ pps_wakeref = intel_pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -430,7 +443,9 @@ out:
if (vdd)
intel_pps_vdd_off_unlocked(intel_dp, false);
- intel_pps_unlock(intel_dp, pps_wakeref);
+ if (pps_wakeref)
+ intel_pps_unlock(intel_dp, pps_wakeref);
+
intel_display_power_put_async(display, aux_domain, aux_wakeref);
out_unlock:
intel_digital_port_unlock(encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2966f5b39392..a479b63112ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -56,6 +56,8 @@
lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
} while (0)
+#define MAX_SEQ_TRAIN_FAILURES 2
+
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
@@ -164,7 +166,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* resetting its internal state when the mode is changed from
* non-transparent to transparent.
*/
- if (intel_dp->link_trained) {
+ if (intel_dp->link.active) {
if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
goto out_reset_lttpr_count;
@@ -711,8 +713,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b
static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Currently, we set the MSA ignore bit based on vrr.in_range.
+ * We can't really read that out during driver load since we don't have
+ * the connector information read in yet. So if we do end up doing a
+ * modeset during initial_commit() we'll clear the MSA ignore bit.
+ * GOP likely wouldn't have set this bit so after the initial commit,
+ * if there are no modesets and we enable VRR mode seamlessly
+ * (without a full modeset), the MSA ignore bit might never get set.
+ *
+ * #TODO: Implement readout of vrr.in_range.
+ * We need fastset support for setting the MSA ignore bit in DPCD,
+ * especially on the first real commit when clearing the inherited flag.
+ */
intel_dp_link_training_set_mode(intel_dp,
- crtc_state->port_clock, crtc_state->vrr.flipline);
+ crtc_state->port_clock, crtc_state->vrr.in_range);
}
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
@@ -1110,7 +1125,10 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_dp->link_trained = true;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->link.active = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
@@ -1120,6 +1138,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
+
+ intel_hpd_unblock(encoder);
+
+ if (!display->hotplug.ignore_long_hpd &&
+ intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
+ int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
+
+ intel_encoder_link_check_queue_work(encoder, delay_ms);
+ }
}
static bool
@@ -1602,7 +1629,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* non-transparent mode. During an earlier LTTPR detection this
* could've been prevented by an active link.
*/
- int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+ int lttpr_count;
+
+ intel_hpd_block(encoder);
+
+ lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
@@ -1620,7 +1651,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
} else if (passed) {
intel_dp->link.seq_train_failures = 0;
- intel_encoder_link_check_queue_work(encoder, 2000);
return;
}
@@ -1643,10 +1673,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
return;
}
- if (intel_dp->link.seq_train_failures < 2) {
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
return;
- }
if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
return;
@@ -1693,7 +1721,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
@@ -1791,7 +1819,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 02f95108c637..4c15dcb103aa 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -52,6 +52,7 @@
#include "intel_pfit.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
/*
@@ -104,6 +105,34 @@ static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
return &dig_port->dp;
}
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst.active_streams;
+}
+
+static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1);
+
+ if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0))
+ return true;
+
+ return --intel_dp->mst.active_streams == 0;
+}
+
+static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1);
+
+ return intel_dp->mst.active_streams++ == 0;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -710,6 +739,8 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -997,11 +1028,8 @@ static void mst_stream_disable(struct intel_atomic_state *state,
to_intel_connector(old_conn_state->connector);
enum transcoder trans = old_crtc_state->cpu_transcoder;
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
- if (intel_dp->mst.active_links == 1)
- intel_dp->link_trained = false;
+ if (intel_dp_mst_active_streams(intel_dp) == 1)
+ intel_dp->link.active = false;
intel_hdcp_disable(intel_mst->connector);
@@ -1034,8 +1062,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
bool last_mst_stream;
int i;
- intel_dp->mst.active_links--;
- last_mst_stream = intel_dp->mst.active_links == 0;
+ last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp);
+
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1062,6 +1090,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1104,8 +1134,6 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
primary_encoder->post_disable(state, primary_encoder,
old_crtc_state, NULL);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1116,7 +1144,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0 &&
+ if (intel_dp_mst_active_streams(intel_dp) == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1129,7 +1157,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0)
+ if (intel_dp_mst_active_streams(intel_dp) == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1189,13 +1217,11 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->mst.active_links == 0;
+
+ first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp);
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1210,8 +1236,6 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->mst.active_links++;
-
ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
@@ -1279,7 +1303,7 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->mst.active_links == 1;
+ bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1;
struct intel_crtc *pipe_crtc;
int ret, i, min_hblank;
@@ -1323,14 +1347,13 @@ static void mst_stream_enable(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ intel_vrr_transcoder_enable(pipe_config);
+
intel_ddi_clear_act_sent(encoder, pipe_config);
intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
intel_ddi_wait_for_act_sent(encoder, pipe_config);
drm_dp_check_act_status(&intel_dp->mst.mgr);
@@ -1870,12 +1893,6 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
-{
- return dig_port->dp.mst.active_links;
-}
-
-int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -2101,7 +2118,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
u8 rate_select;
u8 link_bw;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
return;
if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c1bbfeb02ca9..ab09b487c6bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -18,7 +18,7 @@ struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 08a30e5aafce..0481b1365b85 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -373,14 +373,15 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return i915->display.vbt.lvds_ssc_freq;
+ return display->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(i915))
return 120000;
- else if (DISPLAY_VER(i915) != 2)
+ else if (DISPLAY_VER(display) != 2)
return 96000;
else
return 48000;
@@ -389,27 +390,27 @@ static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
u32 tmp;
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ if (display->platform.cherryview && crtc->pipe != PIPE_A)
+ tmp = display->state.chv_dpll_md[crtc->pipe];
else
- tmp = intel_de_read(dev_priv,
- DPLL_MD(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display,
+ DPLL_MD(display, crtc->pipe));
hw_state->dpll_md = tmp;
}
- hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
+ hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
- hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ if (!display->platform.valleyview && !display->platform.cherryview) {
+ hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
hw_state->dpll &= ~(DPLL_LOCK_VLV |
@@ -421,8 +422,8 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
/* Returns the clock of the currently programmed mode of the given pipe. */
void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
u32 dpll = hw_state->dpll;
u32 fp;
@@ -436,7 +437,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -444,8 +445,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
+ if (DISPLAY_VER(display) != 2) {
+ if (display->platform.pineview)
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -462,23 +463,23 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
7 : 14;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
- if (IS_PINEVIEW(dev_priv))
+ if (display->platform.pineview)
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
enum pipe lvds_pipe;
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ if (display->platform.i85x &&
+ intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
+ u32 lvds = intel_de_read(display, LVDS);
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -577,7 +578,7 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct intel_display *display,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -590,14 +591,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (!display->platform.pineview &&
+ !display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake)
if (clock->m1 <= clock->m2)
return false;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (!display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -620,7 +621,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -628,7 +629,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -656,7 +657,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -677,7 +678,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -714,7 +715,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -733,7 +734,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -770,7 +771,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int max_n;
bool found = false;
@@ -794,7 +795,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -817,7 +818,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
* Check if the calculated PLL configuration is more optimal compared to the
* best configuration and error found so far. Return the calculated error.
*/
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
const struct dpll *calculated_clock,
const struct dpll *best_clock,
unsigned int best_error_ppm,
@@ -827,13 +828,13 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(to_i915(dev))) {
+ if (display->platform.cherryview) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
}
- if (drm_WARN_ON_ONCE(dev, !target_freq))
+ if (drm_WARN_ON_ONCE(display->drm, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -864,8 +865,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -889,12 +889,12 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target,
+ if (!vlv_PLL_is_optimal(display, target,
&clock,
best_clock,
bestppm, &ppm))
@@ -922,8 +922,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
unsigned int best_error_ppm;
struct dpll clock;
u64 m2;
@@ -958,10 +957,10 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(display, limit, &clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
best_error_ppm, &error_ppm))
continue;
@@ -1005,8 +1004,6 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1016,8 +1013,8 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -1030,10 +1027,10 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
WARN_ON(reduced_clock->p1 != clock->p1);
} else {
@@ -1057,7 +1054,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
}
WARN_ON(reduced_clock->p2 != clock->p2);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -1075,11 +1072,10 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
hw_state->fp0 = pnv_dpll_compute_fp(clock);
hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
} else {
@@ -1089,7 +1085,7 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
hw_state->dpll_md = i965_dpll_md(crtc_state);
}
@@ -1098,8 +1094,6 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1129,7 +1123,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
* both DPLLS. The spec says we should disable the DVO 2X clock
* when not needed, but this seems to work fine in practice.
*/
- if (IS_I830(dev_priv) ||
+ if (display->platform.i830 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -1157,14 +1151,14 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
int ret;
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1186,13 +1180,13 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1245,8 +1239,8 @@ static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(display))))
return 25;
if (crtc_state->sdvo_tv_clock)
@@ -1276,8 +1270,6 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE;
@@ -1311,7 +1303,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ if (INTEL_NUM_PIPES(display) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -1362,7 +1354,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1375,13 +1366,13 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->display.vbt.lvds_ssc_freq);
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ display->vbt.lvds_ssc_freq);
+ refclk = display->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev_priv)) {
+ if (intel_is_dual_link_lvds(display)) {
if (refclk == 100000)
limit = &ilk_limits_dual_lvds_100m;
else
@@ -1539,7 +1530,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1547,13 +1537,13 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -1589,7 +1579,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1597,8 +1586,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1628,7 +1617,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1636,8 +1624,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1669,7 +1657,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1677,8 +1664,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1751,12 +1738,12 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -1764,9 +1751,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
+ ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1777,23 +1764,23 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1802,43 +1789,44 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
}
void
-intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 14)
- dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
- else if (IS_DG2(dev_priv))
- dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
+intel_dpll_init_clock_hook(struct intel_display *display)
+{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 14)
+ display->funcs.dpll = &mtl_dpll_funcs;
+ else if (display->platform.dg2)
+ display->funcs.dpll = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
+ display->funcs.dpll = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
- else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &chv_dpll_funcs;
- else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
- else if (IS_G4X(dev_priv))
- dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
+ display->funcs.dpll = &ilk_dpll_funcs;
+ else if (display->platform.cherryview)
+ display->funcs.dpll = &chv_dpll_funcs;
+ else if (display->platform.valleyview)
+ display->funcs.dpll = &vlv_dpll_funcs;
+ else if (display->platform.g4x)
+ display->funcs.dpll = &g4x_dpll_funcs;
+ else if (display->platform.pineview)
+ display->funcs.dpll = &pnv_dpll_funcs;
+ else if (DISPLAY_VER(display) != 2)
+ display->funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
+ display->funcs.dpll = &i8xx_dpll_funcs;
}
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct intel_display *display)
{
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return display->platform.pineview || display->platform.mobile;
}
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1846,27 +1834,27 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
+ if (i9xx_has_pps(display))
assert_pps_unlocked(display, pipe);
- intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
- intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
+ intel_de_write(display, FP0(pipe), hw_state->fp0);
+ intel_de_write(display, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ if (DISPLAY_VER(display) >= 4) {
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1874,20 +1862,21 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150); /* wait for warmup */
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
/*
@@ -1916,6 +1905,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
@@ -1930,7 +1920,7 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy, ch);
+ vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
@@ -2003,24 +1993,23 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2030,7 +2019,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2038,8 +2027,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
@@ -2133,6 +2122,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2156,18 +2146,17 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2177,7 +2166,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VCO_ENABLE);
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2192,29 +2181,29 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
+ intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(display, DPLL_MD(display, PIPE_B),
hw_state->dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
+ intel_de_write(display, CBR4_VLV, 0);
+ display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, DPLL(display, PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
}
/**
* vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to enable
* @dpll: PLL configuration
*
@@ -2222,10 +2211,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_crtc_state *crtc_state;
@@ -2238,7 +2226,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
crtc_state->dpll = *dpll;
crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
chv_compute_dpll(crtc_state);
chv_enable_pll(crtc_state);
} else {
@@ -2251,9 +2239,8 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
return 0;
}
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2268,9 +2255,9 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2316,18 +2303,18 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/**
* vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
+ if (display->platform.cherryview)
+ chv_disable_pll(display, pipe);
else
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
}
/* Only for pre-ILK configs */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 21d06cbd2ce7..280e90a57c87 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -8,16 +8,15 @@
#include <linux/types.h>
+enum pipe;
struct dpll;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dpll_hw_state;
-enum pipe;
-void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -29,14 +28,14 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe);
void chv_enable_pll(const struct intel_crtc_state *crtc_state);
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct intel_display *display, enum pipe pipe);
void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe);
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c825a507b905..84df41086a89 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -257,7 +257,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
if (drm_WARN_ON(display->drm, !pll))
@@ -303,7 +303,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
if (DISPLAY_VER(display) < 5)
@@ -715,7 +715,6 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
static void hsw_ddi_wrpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
@@ -726,13 +725,12 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static void hsw_ddi_spll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
@@ -743,7 +741,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
@@ -2606,10 +2604,8 @@ ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
{
return ((display->platform.elkhartlake &&
IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
- display->platform.tigerlake ||
- display->platform.alderlake_s ||
- display->platform.alderlake_p) &&
- display->dpll.ref_clks.nssc == 38400;
+ DISPLAY_VER(display) >= 12) &&
+ display->dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 0d8ebe38226e..43bd97e4f589 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,7 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -127,7 +128,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
struct drm_i915_private *i915 = vm->i915;
struct intel_display *display = &i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
@@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
if (i915_gem_object_is_stolen(dpt->obj))
pin_flags |= PIN_MAPPABLE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
@@ -169,7 +170,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return err ? ERR_PTR(err) : vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 9fc4003d1579..72fe390c5af2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -11,6 +11,7 @@
#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
#include "intel_dsb_buffer.h"
@@ -142,10 +143,10 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- unsigned int latency = skl_watermark_max_latency(i915, 0);
+ unsigned int latency = skl_watermark_max_latency(display, 0);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
@@ -795,22 +796,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
enum intel_dsb_id dsb_id,
unsigned int max_cmds)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(state);
+ struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
- if (!HAS_DSB(i915))
+ if (!HAS_DSB(display))
return NULL;
- if (!i915->display.params.enable_dsb)
+ if (!display->params.enable_dsb)
return NULL;
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
@@ -818,7 +819,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
dsb->crtc = crtc;
@@ -831,10 +832,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
return dsb;
out_put_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
kfree(dsb);
out:
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
crtc->base.base.id, crtc->base.name, dsb_id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 049443245310..b3c453bf7d5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -24,9 +24,10 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
@@ -162,7 +163,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -172,7 +173,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using DCS for backlight control\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 7b2ffd14ae6e..4e92504f5c14 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -102,13 +102,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
flags = *data++;
type = *data++;
@@ -120,12 +120,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
- if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port]))
goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ drm_dbg_kms(display->drm, "no dsi device for port %c\n",
port_name(port));
goto out;
}
@@ -150,8 +150,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg(&dev_priv->drm,
- "Generic Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -163,15 +162,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg(&dev_priv->drm,
- "DCS Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -182,10 +180,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 delay = *((const u32 *) data);
- drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
+ drm_dbg_kms(display->drm, "%d usecs\n", delay);
usleep_range(delay, delay + 10);
data += 4;
@@ -196,7 +194,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
const char *con_id, u8 idx, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
@@ -204,10 +202,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
if (gpio_desc) {
gpiod_set_value(gpio_desc, value);
} else {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx,
value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
if (IS_ERR(gpio_desc)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"GPIO index %u request failed (%pe)\n",
gpio_index, gpio_desc);
return;
@@ -242,16 +240,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector,
static void vlv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
if (connector->panel.vbt.dsi.seq_version < 3) {
if (gpio_source == 1) {
- drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
+ drm_dbg_kms(display->drm, "SC gpio not supported\n");
return;
}
if (gpio_source > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
@@ -264,7 +262,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector,
static void chv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
@@ -284,13 +282,13 @@ static void chv_gpio_set_value(struct intel_connector *connector,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid gpio index %u for GPIO N\n",
gpio_index);
return;
@@ -320,13 +318,13 @@ enum {
MIPI_VIO_EN_2,
};
-static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+static void icl_native_gpio_set_value(struct intel_display *display,
int gpio, bool value)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int index;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2))
return;
switch (gpio) {
@@ -344,7 +342,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
* modifications in irq setup and handling.
*/
spin_lock_irq(&dev_priv->irq_lock);
- intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
@@ -354,14 +352,14 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
case MIPI_AVDD_EN_2:
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON,
+ intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON,
value ? PANEL_POWER_ON : 0);
break;
case MIPI_BKLT_EN_1:
case MIPI_BKLT_EN_2:
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE,
+ intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE,
value ? EDP_BLC_ENABLE : 0);
break;
case MIPI_AVEE_EN_1:
@@ -389,13 +387,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
int size;
- bool native = DISPLAY_VER(i915) >= 11;
+ bool native = DISPLAY_VER(display) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3) {
size = 3;
@@ -416,16 +413,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio_source = (data[1] >> 1) & 3;
}
- drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(i915, gpio_number, value);
- else if (DISPLAY_VER(i915) >= 9)
+ icl_native_gpio_set_value(display, gpio_number, value);
+ else if (DISPLAY_VER(display) >= 9)
bxt_gpio_set_value(connector, gpio_index, value);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
chv_gpio_set_value(connector, gpio_source, gpio_number, value);
return data + size;
@@ -463,8 +460,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 target_addr)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct acpi_device *adev = ACPI_COMPANION(display->drm->dev);
struct i2c_adapter_lookup lookup = {
.target_addr = target_addr,
.intel_dsi = intel_dsi,
@@ -484,7 +481,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -494,7 +491,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
@@ -504,7 +501,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -522,7 +519,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to xfer payload of size (%u) to reg (%u)\n",
payload_size, reg_offset);
@@ -535,16 +532,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
- drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
+ drm_dbg_kms(display->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -560,9 +557,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
+ drm_err(display->drm, "%s failed, error: %d\n", __func__, ret);
#else
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
@@ -612,12 +609,12 @@ static const char *sequence_name(enum mipi_seq seq_id)
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
@@ -625,9 +622,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (!data)
return;
- drm_WARN_ON(&dev_priv->drm, *data != seq_id);
+ drm_WARN_ON(display->drm, *data != seq_id);
- drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
@@ -657,19 +654,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
return;
@@ -695,54 +692,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
- drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
- intel_dsi->pixel_overlap);
- drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
- drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- drm_dbg_kms(&i915->drm, "Video mode format %s\n",
- intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode == BURST_MODE ?
- "burst" : "<unknown>");
- drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
- intel_dsi->burst_mode_ratio);
- drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
- drm_dbg_kms(&i915->drm, "Eot %s\n",
- str_enabled_disabled(intel_dsi->eotp_pkt));
- drm_dbg_kms(&i915->drm, "Clockstop %s\n",
- str_enabled_disabled(!intel_dsi->clock_stop));
- drm_dbg_kms(&i915->drm, "Mode %s\n",
- intel_dsi->operation_mode ? "command" : "video");
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
+ "DSI parameters:");
+
+ drm_printf(&p, "Pclk %d\n", intel_dsi->pclk);
+ drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count);
+ drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_printf(&p, "Video mode format %s\n",
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode == BURST_MODE ?
+ "burst" : "<unknown>");
+ drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt));
+ drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop));
+ drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
- drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
- drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
- drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
- intel_dsi->lp_rx_timeout);
- drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
- intel_dsi->turn_arnd_val);
- drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
- drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
- intel_dsi->hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
- intel_dsi->clk_lp_to_hs_count);
- drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
- intel_dsi->clk_hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "BTA %s\n",
- str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_printf(&p, "Dual link: NONE\n");
+ drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ drm_printf(&p, "BTA %s\n",
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
@@ -764,8 +751,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
@@ -773,7 +759,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -819,7 +805,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 bitrate;
if (mipi_config->target_burst_mode_freq == 0) {
- drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ drm_err(display->drm, "Burst mode target is not set\n");
return false;
}
@@ -836,7 +822,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
+ drm_err(display->drm, "Burst mode freq is less than computed\n");
return false;
}
@@ -900,8 +886,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
@@ -911,13 +896,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct pinctrl *pinctrl;
int ret;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) {
gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -926,12 +911,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to register pwm0 pinmux mapping\n");
- pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to set pinmux to PWM\n");
}
@@ -939,9 +924,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
gpiod_add_lookup_table(gpiod_lookup_table);
if (want_panel_gpio) {
- intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
@@ -949,9 +934,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- devm_gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(display->drm->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c16fb34b737d..b61520353c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -31,10 +31,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
@@ -129,13 +130,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -146,11 +147,11 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
*pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
@@ -160,13 +161,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -186,14 +187,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(display, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -201,7 +202,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
@@ -209,8 +210,8 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(display, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -288,7 +289,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port = encoder->port;
@@ -296,7 +297,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
u32 dvo_val;
/* Save the active data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(i915, DVO(port)) &
+ dvo_val = intel_de_read(display, DVO(port)) &
(DVO_DEDICATED_INT_ENABLE |
DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
@@ -309,10 +310,10 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(i915, DVO_SRCDIM(port),
+ intel_de_write(display, DVO_SRCDIM(port),
DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
- intel_de_write(i915, DVO(port), dvo_val);
+ intel_de_write(display, DVO(port), dvo_val);
}
static enum drm_connector_status
@@ -320,10 +321,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
@@ -414,11 +414,10 @@ static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
}
}
-static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+static bool intel_dvo_init_dev(struct intel_display *display,
struct intel_dvo *intel_dvo,
const struct intel_dvo_device *dvo)
{
- struct intel_display *display = &dev_priv->display;
struct i2c_adapter *i2c;
u32 dpll[I915_MAX_PIPES];
enum pipe pipe;
@@ -458,15 +457,15 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe)
- dpll[pipe] = intel_de_rmw(dev_priv, DPLL(dev_priv, pipe), 0,
+ for_each_pipe(display, pipe)
+ dpll[pipe] = intel_de_rmw(display, DPLL(display, pipe), 0,
DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll[pipe]);
+ for_each_pipe(display, pipe) {
+ intel_de_write(display, DPLL(display, pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -474,14 +473,14 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
return ret;
}
-static bool intel_dvo_probe(struct drm_i915_private *i915,
+static bool intel_dvo_probe(struct intel_display *display,
struct intel_dvo *intel_dvo)
{
int i;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- if (intel_dvo_init_dev(i915, intel_dvo,
+ if (intel_dvo_init_dev(display, intel_dvo,
&intel_dvo_devices[i]))
return true;
}
@@ -489,9 +488,8 @@ static bool intel_dvo_probe(struct drm_i915_private *i915,
return false;
}
-void intel_dvo_init(struct drm_i915_private *i915)
+void intel_dvo_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_dvo *intel_dvo;
@@ -518,7 +516,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->pre_enable = intel_dvo_pre_enable;
connector->get_hw_state = intel_dvo_connector_get_hw_state;
- if (!intel_dvo_probe(i915, intel_dvo)) {
+ if (!intel_dvo_probe(display, intel_dvo)) {
kfree(intel_dvo);
intel_connector_free(connector);
return;
@@ -535,12 +533,12 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
BIT(INTEL_OUTPUT_DVO);
- drm_encoder_init(&i915->drm, &encoder->base,
+ drm_encoder_init(display->drm, &encoder->base,
&intel_dvo_enc_funcs,
intel_dvo_encoder_type(&intel_dvo->dev),
"DVO %c", port_name(encoder->port));
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] detected %s\n",
encoder->base.base.id, encoder->base.name,
intel_dvo->dev.name);
@@ -549,7 +547,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
DRM_CONNECTOR_POLL_DISCONNECT;
connector->base.polled = connector->polled;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_dvo_connector_funcs,
intel_dvo_connector_type(&intel_dvo->dev),
intel_gmbus_get_adapter(display, GMBUS_PIN_DPC));
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index bf7a356422ab..83776552fc87 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_DVO_H__
#define __INTEL_DVO_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_dvo_init(struct drm_i915_private *dev_priv);
+void intel_dvo_init(struct intel_display *display);
#else
-static inline void intel_dvo_init(struct drm_i915_private *dev_priv)
+static inline void intel_dvo_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 30ac9b089ad6..c648ab8a93d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -12,6 +12,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -117,7 +118,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
@@ -136,7 +137,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
@@ -215,7 +216,7 @@ err:
vma = ERR_PTR(ret);
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b6978135e8ad..ce5b1e3f1c20 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,6 +55,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_display_wa.h"
@@ -519,6 +520,20 @@ static void ilk_fbc_activate(struct intel_fbc *fbc)
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
+static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc,
+ bool disable)
+{
+ struct intel_display *display = fbc->display;
+
+ if (display->platform.dg2)
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS,
+ disable ? DG2_DPFC_GATING_DIS : 0);
+ else if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id),
+ MTL_DPFC_GATING_DIS,
+ disable ? MTL_DPFC_GATING_DIS : 0);
+}
+
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
@@ -532,6 +547,10 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -921,6 +940,10 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+
+ /* wa_18038517565 Disable DPFC clock gating before FBC enable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, true);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
@@ -1436,7 +1459,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(i915)) {
+ if (intel_display_needs_wa_16023588340(display)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1464,14 +1487,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*
- * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
- * coexist. So if PSR2 selective fetch is supported then mark that
- * FBC is not supported.
- * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
+ * TODO: Implement a logic to select between PSR2 selective fetch and
+ * FBC based on Bspec: 68881 in xe2lpd onwards.
+ *
+ * As we still see some strange underruns in those platforms while
+ * disabling PSR2, keep FBC disabled in case of selective update is on
+ * until the selection logic is implemented.
*/
- if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
- crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
- plane_state->no_fbc_reason = "PSR2 enabled";
+ if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) {
+ plane_state->no_fbc_reason = "Selective update enabled";
return 0;
}
@@ -2120,13 +2144,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
drm_modeset_lock_all(display->drm);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&fbc->lock);
if (fbc->active) {
@@ -2151,7 +2174,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
drm_modeset_unlock_all(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc19d5607de..369f46286e95 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -50,6 +50,7 @@
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
@@ -213,7 +214,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct intel_framebuffer *fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(dev);
+ struct ref_tracker *wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
@@ -247,7 +249,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
sizes->fb_height = fb->base.height;
}
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -299,14 +301,15 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
out_unpin:
intel_fb_unpin_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 7a8fbff39be0..451cd26024f7 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -136,14 +136,13 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
static void ilk_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
- ilk_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(display, bit);
else
- ilk_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(display, bit);
}
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
@@ -169,7 +168,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable,
bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
intel_de_write(display, GEN7_ERR_INT,
ERR_INT_FIFO_UNDERRUN(pipe));
@@ -177,9 +175,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
if (!ivb_can_enable_err_int(display))
return;
- ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_enable_display_irq(display, DE_ERR_INT_IVB);
} else {
- ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_disable_display_irq(display, DE_ERR_INT_IVB);
if (old &&
intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -193,26 +191,23 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
static void bdw_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
+ ibx_enable_display_interrupt(display, bit);
else
- ibx_disable_display_interrupt(dev_priv, bit);
+ ibx_disable_display_interrupt(display, bit);
}
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
@@ -240,8 +235,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable) {
intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
@@ -249,9 +242,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
if (!cpt_can_enable_serr_int(display))
return;
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(display, SDE_ERROR_CPT);
} else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_disable_display_interrupt(display, SDE_ERROR_CPT);
if (old && intel_de_read(display, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
@@ -477,8 +470,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
crtc->cpu_fifo_underrun_disabled = !enable;
/*
@@ -490,6 +481,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (intel_has_pch_trancoder(i915, crtc->pipe))
+ if (intel_has_pch_trancoder(display, crtc->pipe))
crtc->pch_fifo_underrun_disabled = !enable;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1bf424a822f3..411f17655f89 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -22,7 +22,9 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_regs.h"
@@ -136,7 +138,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.mst.active_links <= 1)
+ if (intel_dp_mst_active_streams(&dig_port->dp) <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -334,9 +336,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
static bool hdcp_key_loadable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum i915_power_well_id id;
- intel_wakeref_t wakeref;
bool enabled = false;
/*
@@ -349,7 +349,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
id = SKL_DISP_PW_1;
/* PG1 (power well #1) needs to be enabled */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
enabled = intel_display_power_well_is_enabled(display, id);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 33b8d5229db0..f9fa17e1f584 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -64,6 +64,7 @@
#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_snps_phy.h"
+#include "intel_vrr.h"
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
@@ -2384,6 +2385,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 00d7b1ccf190..6885e5a09079 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,6 +30,7 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -118,7 +119,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
+ * @display: display device
* @pin: the pin to gather stats on
* @long_hpd: whether the HPD IRQ was long or short
*
@@ -127,13 +128,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
+ * stored in @display->hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
+ * &display->hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -145,10 +146,10 @@ intel_connector_hpd_pin(struct intel_connector *connector)
*
* Return true if an IRQ storm was detected on @pin.
*/
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+static bool intel_hpd_irq_storm_detect(struct intel_display *display,
enum hpd_pin pin, bool long_hpd)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -156,7 +157,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -167,11 +168,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Received HPD interrupt on PIN %d - cnt: %d\n",
pin,
hpd->stats[pin].count);
@@ -180,56 +181,65 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
-static bool detection_work_enabled(struct drm_i915_private *i915)
+static bool detection_work_enabled(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
lockdep_assert_held(&i915->irq_lock);
- return i915->display.hotplug.detection_work_enabled;
+ return display->hotplug.detection_work_enabled;
}
static bool
-mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
lockdep_assert_held(&i915->irq_lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
return mod_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
lockdep_assert_held(&i915->irq_lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
return queue_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+queue_detection_work(struct intel_display *display, struct work_struct *work)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
lockdep_assert_held(&i915->irq_lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
return queue_work(i915->unordered_wq, work);
}
static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool hpd_disabled = false;
lockdep_assert_held(&dev_priv->irq_lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -238,15 +248,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ display->hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -255,36 +265,36 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.reenable_work,
+ drm_kms_helper_poll_reschedule(display->drm);
+ mod_delayed_detection_work(display,
+ &display->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- display.hotplug.reenable_work.work);
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.reenable_work.work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
spin_lock_irq(&dev_priv->irq_lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
+ display->hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Reenabling HPD on connector %s\n",
connector->base.name);
connector->base.polled = connector->polled;
@@ -292,15 +302,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
+ display->hotplug.stats[pin].state = HPD_ENABLED;
}
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static enum intel_hotplug_state
@@ -349,32 +359,75 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
+static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ if (encoder->hpd_pin != pin)
+ continue;
+
+ if (intel_encoder_has_hpd_pulse(encoder))
+ return true;
+ }
+
+ return false;
+}
+
+static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ return display->hotplug.stats[pin].blocked_count;
+}
+
+static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
+{
+ enum hpd_pin pin;
+ u32 hpd_pin_mask = 0;
+
+ for_each_hpd_pin(pin) {
+ if (hpd_pin_is_blocked(display, pin))
+ hpd_pin_mask |= BIT(pin);
+ }
+
+ return hpd_pin_mask;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
- u32 long_port_mask, short_port_mask;
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.dig_port_work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ u32 long_hpd_pin_mask, short_hpd_pin_mask;
struct intel_encoder *encoder;
+ u32 blocked_hpd_pin_mask;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->display.hotplug.long_port_mask;
- dev_priv->display.hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->display.hotplug.short_port_mask;
- dev_priv->display.hotplug.short_port_mask = 0;
+
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
+ short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
+
spin_unlock_irq(&dev_priv->irq_lock);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
- enum port port = encoder->port;
+ enum hpd_pin pin = encoder->hpd_pin;
bool long_hpd, short_hpd;
enum irqreturn ret;
if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- long_hpd = long_port_mask & BIT(port);
- short_hpd = short_port_mask & BIT(port);
+ long_hpd = long_hpd_pin_mask & BIT(pin);
+ short_hpd = short_hpd_pin_mask & BIT(pin);
if (!long_hpd && !short_hpd)
continue;
@@ -384,15 +437,15 @@ static void i915_digport_work_func(struct work_struct *work)
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
/* fall back to old school hpd */
- old_bits |= BIT(encoder->hpd_pin);
+ old_bits |= BIT(pin);
}
}
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.event_bits |= old_bits;
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ display->hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
spin_unlock_irq(&dev_priv->irq_lock);
}
}
@@ -406,13 +459,18 @@ static void i915_digport_work_func(struct work_struct *work)
*/
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ struct intel_encoder *encoder = &dig_port->base;
spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
- spin_unlock_irq(&i915->irq_lock);
- queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
+ hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
+ if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
+
+ spin_unlock_irq(&i915->irq_lock);
}
/*
@@ -420,9 +478,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
*/
static void i915_hotplug_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.hotplug_work.work);
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.hotplug_work.work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
u32 changed = 0, retry = 0;
@@ -430,30 +489,32 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
struct drm_connector *first_changed_connector = NULL;
int changed_connectors = 0;
+ u32 blocked_hpd_pin_mask;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
- hpd_event_bits = dev_priv->display.hotplug.event_bits;
- dev_priv->display.hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
- dev_priv->display.hotplug.retry_bits = 0;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
+ hotplug->event_bits &= ~hpd_event_bits;
+ hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
+ hotplug->retry_bits &= ~hpd_retry_bits;
/* Enable polling for connectors which had HPD IRQ storms */
- intel_hpd_irq_storm_switch_to_polling(dev_priv);
+ intel_hpd_irq_storm_switch_to_polling(display);
spin_unlock_irq(&dev_priv->irq_lock);
/* Skip calling encode hotplug handlers if ignore long HPD set*/
- if (dev_priv->display.hotplug.ignore_long_hpd) {
- drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ if (display->hotplug.ignore_long_hpd) {
+ drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&display->drm->mode_config.mutex);
return;
}
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
u32 hpd_bit;
@@ -472,7 +533,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
else
connector->hotplug_retries++;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Connector %s (pin %i) received hotplug event. (retry %d)\n",
connector->base.name, pin,
connector->hotplug_retries);
@@ -495,12 +556,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (changed_connectors == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed_connectors > 0)
- drm_kms_helper_hotplug_event(&dev_priv->drm);
+ drm_kms_helper_hotplug_event(display->drm);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
@@ -509,10 +570,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
retry &= ~changed;
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.retry_bits |= retry;
+ display->hotplug.retry_bits |= retry;
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_detection_work(display,
+ &display->hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -521,7 +582,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
+ * @display: display device
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -535,9 +596,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
@@ -556,8 +618,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum port port = encoder->port;
+ for_each_intel_encoder(display->drm, encoder) {
bool long_hpd;
pin = encoder->hpd_pin;
@@ -569,18 +630,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"digital hpd on [ENCODER:%d:%s] - %s\n",
encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
- queue_dig = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_dig = true;
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.long_port_mask |= BIT(port);
+ display->hotplug.long_hpd_pin_mask |= BIT(pin);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.short_port_mask |= BIT(port);
+ display->hotplug.short_hpd_pin_mask |= BIT(pin);
}
}
@@ -591,20 +654,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
+ if (display->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
"Received HPD interrupt on pin %d although disabled\n",
pin);
continue;
}
- if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
+ if (display->hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -615,13 +678,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->display.hotplug.event_bits |= BIT(pin);
+ display->hotplug.event_bits |= BIT(pin);
long_hpd = true;
- queue_hp = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->display.hotplug.event_bits &= ~BIT(pin);
+ if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
+ display->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -632,7 +697,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* happens later in our hotplug work.
*/
if (storm_detected)
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -641,17 +706,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
+ queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
spin_unlock(&dev_priv->irq_lock);
}
/**
* intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
@@ -663,16 +728,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*
* Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
*/
-void intel_hpd_init(struct drm_i915_private *dev_priv)
+void intel_hpd_init(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int i;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
for_each_hpd_pin(i) {
- dev_priv->display.hotplug.stats[i].count = 0;
- dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
+ display->hotplug.stats[i].count = 0;
+ display->hotplug.stats[i].state = HPD_ENABLED;
}
/*
@@ -680,23 +746,23 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
* just to make the assert_spin_locked checks happy.
*/
spin_lock_irq(&dev_priv->irq_lock);
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
+static void i915_hpd_poll_detect_connectors(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *first_changed_connector = NULL;
int changed = 0;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- if (!i915->drm.mode_config.poll_enabled)
+ if (!display->drm->mode_config.poll_enabled)
goto out;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
continue;
@@ -714,7 +780,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
out:
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!changed)
return;
@@ -722,25 +788,24 @@ out:
if (changed == 1)
drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
else
- drm_kms_helper_hotplug_event(&i915->drm);
+ drm_kms_helper_hotplug_event(display->drm);
drm_connector_put(&first_changed_connector->base);
}
static void i915_hpd_poll_init_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.poll_init_work);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.poll_init_work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
bool enabled;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
+ enabled = READ_ONCE(display->hotplug.poll_enabled);
/*
* Prevent taking a power reference from this sequence of
* i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
@@ -750,14 +815,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (!enabled) {
wakeref = intel_display_power_get(display,
POWER_DOMAIN_DISPLAY_CORE);
- drm_WARN_ON(&dev_priv->drm,
- READ_ONCE(dev_priv->display.hotplug.poll_enabled));
- cancel_work(&dev_priv->display.hotplug.poll_init_work);
+ drm_WARN_ON(display->drm,
+ READ_ONCE(display->hotplug.poll_enabled));
+ cancel_work(&display->hotplug.poll_init_work);
}
spin_lock_irq(&dev_priv->irq_lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -765,7 +830,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
continue;
connector->base.polled = connector->polled;
@@ -779,16 +844,16 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
if (enabled)
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(display->drm);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/*
* We might have missed any hotplugs that happened while we were
* in the middle of disabling polling
*/
if (!enabled) {
- i915_hpd_poll_detect_connectors(dev_priv);
+ i915_hpd_poll_detect_connectors(display);
intel_display_power_put(display,
POWER_DOMAIN_DISPLAY_CORE,
@@ -798,7 +863,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
/**
* intel_hpd_poll_enable - enable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -812,15 +877,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
*/
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_enable(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(display))
+ if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
+ WRITE_ONCE(display->hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -829,14 +893,14 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* this worker anyway
*/
spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
spin_unlock_irq(&dev_priv->irq_lock);
}
/**
* intel_hpd_poll_disable - disable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function disables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -853,26 +917,28 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
*
* Also see: intel_hpd_init() and intel_hpd_poll_enable().
*/
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_disable(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (!HAS_DISPLAY(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ WRITE_ONCE(display->hotplug.poll_enabled, false);
spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
spin_unlock_irq(&dev_priv->irq_lock);
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
+void intel_hpd_poll_fini(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
intel_connector_cancel_modeset_retry_work(connector);
intel_hdcp_cancel_works(connector);
@@ -880,141 +946,257 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_hpd_init_early(struct drm_i915_private *i915)
+void intel_hpd_init_early(struct intel_display *display)
{
- INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
+ INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&display->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
- i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
+ display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
}
-static bool cancel_all_detection_work(struct drm_i915_private *i915)
+static bool cancel_all_detection_work(struct intel_display *display)
{
bool was_pending = false;
- if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
was_pending = true;
- if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ if (cancel_work_sync(&display->hotplug.poll_init_work))
was_pending = true;
- if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
was_pending = true;
return was_pending;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void intel_hpd_cancel_work(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (!HAS_DISPLAY(display))
return;
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.long_port_mask = 0;
- dev_priv->display.hotplug.short_port_mask = 0;
- dev_priv->display.hotplug.event_bits = 0;
- dev_priv->display.hotplug.retry_bits = 0;
+ drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
+
+ display->hotplug.long_hpd_pin_mask = 0;
+ display->hotplug.short_hpd_pin_mask = 0;
+ display->hotplug.event_bits = 0;
+ display->hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_work_sync(&display->hotplug.dig_port_work);
/*
* All other work triggered by hotplug events should be canceled by
* now.
*/
- if (cancel_all_detection_work(dev_priv))
- drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
+ if (cancel_all_detection_work(display))
+ drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
}
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static void queue_work_for_missed_irqs(struct intel_display *display)
{
- bool ret = false;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool queue_hp_work = false;
+ u32 blocked_hpd_pin_mask;
+ enum hpd_pin pin;
- if (pin == HPD_NONE)
- return false;
+ lockdep_assert_held(&i915->irq_lock);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
- ret = true;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
+ queue_hp_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (display->hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_hp_work = true;
+ break;
+ case HPD_DISABLED:
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(display->hotplug.stats[pin].state);
+ }
}
- spin_unlock_irq(&dev_priv->irq_lock);
- return ret;
+ if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
+
+ if (queue_hp_work)
+ queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
}
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
{
- if (pin == HPD_NONE)
- return;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
- spin_unlock_irq(&dev_priv->irq_lock);
+ lockdep_assert_held(&i915->irq_lock);
+
+ hotplug->stats[pin].blocked_count++;
+
+ return hotplug->stats[pin].blocked_count == 1;
}
-static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
{
- bool queue_work = false;
- enum hpd_pin pin;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
lockdep_assert_held(&i915->irq_lock);
- if (i915->display.hotplug.event_bits ||
- i915->display.hotplug.retry_bits)
- queue_work = true;
+ if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
+ return true;
- for_each_hpd_pin(pin) {
- switch (i915->display.hotplug.stats[pin].state) {
- case HPD_MARK_DISABLED:
- queue_work = true;
- break;
- case HPD_ENABLED:
- break;
- default:
- MISSING_CASE(i915->display.hotplug.stats[pin].state);
- }
+ hotplug->stats[pin].blocked_count--;
+
+ return hotplug->stats[pin].blocked_count == 0;
+}
+
+/**
+ * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to block the HPD handling for
+ *
+ * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
+ *
+ * On return:
+ *
+ * - It's guaranteed that the blocked encoders' HPD pulse handler
+ * (via intel_digital_port::hpd_pulse()) is not running.
+ * - The hotplug event handling (via intel_encoder::hotplug()) of an
+ * HPD IRQ pending at the time this function is called may be still
+ * running.
+ * - Detection on the encoder's connector (via
+ * drm_connector_helper_funcs::detect_ctx(),
+ * drm_connector_funcs::detect()) remains allowed, for instance as part of
+ * userspace connector probing, or DRM core's connector polling.
+ *
+ * The call must be followed by calling intel_hpd_unblock(), or
+ * intel_hpd_clear_and_unblock().
+ *
+ * Note that the handling of HPD IRQs for another encoder using the same HPD
+ * pin as that of @encoder will be also blocked.
+ */
+void intel_hpd_block(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool do_flush = false;
+
+ if (encoder->hpd_pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&i915->irq_lock);
+
+ if (block_hpd_pin(display, encoder->hpd_pin))
+ do_flush = true;
+
+ spin_unlock_irq(&i915->irq_lock);
+
+ if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
+ flush_work(&hotplug->dig_port_work);
+}
+
+/**
+ * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be handled for @encoder and for any
+ * other encoder sharing the same HPD pin.
+ */
+void intel_hpd_unblock(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (encoder->hpd_pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&i915->irq_lock);
+
+ if (unblock_hpd_pin(display, encoder->hpd_pin))
+ queue_work_for_missed_irqs(display);
+
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+/**
+ * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be cleared, handling only new IRQs.
+ */
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ enum hpd_pin pin = encoder->hpd_pin;
+
+ if (pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&i915->irq_lock);
+
+ if (unblock_hpd_pin(display, pin)) {
+ hotplug->event_bits &= ~BIT(pin);
+ hotplug->retry_bits &= ~BIT(pin);
+ hotplug->short_hpd_pin_mask &= ~BIT(pin);
+ hotplug->long_hpd_pin_mask &= ~BIT(pin);
}
- if (queue_work)
- queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irq(&i915->irq_lock);
}
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_enable_detection_work(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = true;
- queue_work_for_missed_irqs(i915);
+ display->hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(display);
spin_unlock_irq(&i915->irq_lock);
}
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_disable_detection_work(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = false;
+ display->hotplug.detection_work_enabled = false;
spin_unlock_irq(&i915->irq_lock);
- cancel_all_detection_work(i915);
+ cancel_all_detection_work(display);
}
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+bool intel_hpd_schedule_detection(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long flags;
bool ret;
spin_lock_irqsave(&i915->irq_lock, flags);
- ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
spin_unlock_irqrestore(&i915->irq_lock, flags);
return ret;
@@ -1022,15 +1204,16 @@ bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->display.hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+ flush_work(&display->hotplug.dig_port_work);
+ flush_delayed_work(&display->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1044,8 +1227,9 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1070,11 +1254,11 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
return -EINVAL;
if (new_threshold > 0)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting HPD storm detection threshold to %d\n",
new_threshold);
else
- drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+ drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
spin_lock_irq(&dev_priv->irq_lock);
hotplug->hpd_storm_threshold = new_threshold;
@@ -1084,7 +1268,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1105,10 +1289,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+ str_yes_no(display->hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1125,8 +1309,9 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
char *newline;
char tmp[16];
int i;
@@ -1147,11 +1332,11 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
/* Reset to the "default" state for this system */
if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
+ new_state = !HAS_DP_MST(display);
else if (kstrtobool(tmp, &new_state) != 0)
return -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
spin_lock_irq(&dev_priv->irq_lock);
@@ -1162,7 +1347,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1176,14 +1361,14 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.write = i915_hpd_short_storm_ctl_write,
};
-void intel_hpd_debugfs_register(struct drm_i915_private *i915)
+void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_storm_ctl_fops);
+ display, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_short_storm_ctl_fops);
+ display, &i915_hpd_short_storm_ctl_fops);
debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
- &i915->display.hotplug.ignore_long_hpd);
+ &display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index d6986902b054..edc41c9d3d65 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,30 +8,31 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum port;
struct intel_connector;
struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
-enum port;
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
+void intel_hpd_poll_enable(struct intel_display *display);
+void intel_hpd_poll_disable(struct intel_display *display);
+void intel_hpd_poll_fini(struct intel_display *display);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask);
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_early(struct drm_i915_private *i915);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void intel_hpd_init(struct intel_display *display);
+void intel_hpd_init_early(struct intel_display *display);
+void intel_hpd_cancel_work(struct intel_display *display);
enum hpd_pin intel_hpd_pin_default(enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_block(struct intel_encoder *encoder);
+void intel_hpd_unblock(struct intel_encoder *encoder);
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder);
+void intel_hpd_debugfs_register(struct intel_display *display);
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct intel_display *display);
+void intel_hpd_disable_detection_work(struct intel_display *display);
+bool intel_hpd_schedule_detection(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 2137ac7b882a..2463e61e7802 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -131,30 +131,31 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = {
[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
};
-static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+static void intel_hpd_init_pins(struct intel_display *display)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hpd = &display->hotplug;
- if (HAS_GMCH(dev_priv)) {
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
hpd->hpd = hpd_status_g4x;
else
hpd->hpd = hpd_status_i915;
return;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
hpd->hpd = hpd_xelpdp;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
hpd->hpd = hpd_gen11;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
hpd->hpd = hpd_bxt;
- else if (DISPLAY_VER(dev_priv) == 9)
+ else if (DISPLAY_VER(display) == 9)
hpd->hpd = NULL; /* no north HPD on SKL */
- else if (DISPLAY_VER(dev_priv) >= 8)
+ else if (DISPLAY_VER(display) >= 8)
hpd->hpd = hpd_bdw;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
hpd->hpd = hpd_ivb;
else
hpd->hpd = hpd_ilk;
@@ -180,19 +181,20 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
}
/* For display hotplug interrupt */
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, bits & ~mask);
+ drm_WARN_ON(display->drm, bits & ~mask);
- intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask,
- bits);
+ intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits);
}
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
- * @dev_priv: driver private
+ * @display: display device
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
@@ -202,12 +204,14 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask,
u32 bits)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
spin_lock_irq(&dev_priv->irq_lock);
- i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
+ i915_hotplug_interrupt_update_locked(display, mask, bits);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -339,7 +343,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
*
* Note that the caller is expected to zero out the masks initially.
*/
-static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+static void intel_get_hpd_pins(struct intel_display *display,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
@@ -359,37 +363,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- drm_dbg(&dev_priv->drm,
- "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg_kms(display->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
-static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_enabled_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ for_each_intel_encoder(display->drm, encoder)
+ if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_hotplug_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 hotplug_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug_irqs |= hpd[encoder->hpd_pin];
return hotplug_irqs;
}
-static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_mask(struct intel_display *display,
hotplug_mask_func hotplug_mask)
{
enum hpd_pin pin;
@@ -401,25 +405,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
return hotplug;
}
-static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_enables(struct intel_display *display,
hotplug_enables_func hotplug_enables)
{
struct intel_encoder *encoder;
u32 hotplug = 0;
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug |= hotplug_enables(encoder);
return hotplug;
}
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+u32 i9xx_hpd_irq_ack(struct intel_display *display)
{
u32 hotplug_status = 0, hotplug_status_mask;
int i;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
else
@@ -435,53 +439,51 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = intel_uncore_read(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask;
+ u32 tmp = intel_de_read(display,
+ PORT_HOTPLUG_STAT(display)) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- intel_uncore_write(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv),
- hotplug_status);
+ intel_de_write(display, PORT_HOTPLUG_STAT(display),
+ hotplug_status);
}
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv)));
+ intel_de_read(display, PORT_HOTPLUG_STAT(display)));
return hotplug_status;
}
-void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status)
{
- struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
else
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
- if ((IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
intel_dp_aux_irq_handler(display);
}
-void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -491,7 +493,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -500,63 +502,62 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
dig_hotplug_reg &= ~mask;
}
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
u32 pin_mask = 0, long_mask = 0;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
trigger_aux |= iir & XE2LPD_AUX_DDI_MASK;
for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
u32 val;
- if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
+ if (!(display->hotplug.hpd[pin] & hotplug_trigger))
continue;
pin_mask |= BIT(pin);
- val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
- intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
+ val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin));
+ intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val);
if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
long_mask |= BIT(pin);
}
if (pin_mask) {
- drm_dbg(&i915->drm,
- "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, pin_mask, long_mask);
+ drm_dbg_kms(display->drm,
+ "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, pin_mask, long_mask);
- intel_hpd_irq_handler(i915, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
if (trigger_aux)
intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
}
-void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void icp_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
u32 pin_mask = 0, long_mask = 0;
@@ -566,36 +567,35 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
/* Locking due to DSI native GPIO sequences */
spin_lock(&dev_priv->irq_lock);
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0);
spin_unlock(&dev_priv->irq_lock);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
intel_gmbus_irq_handler(display);
}
-void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void spt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -604,61 +604,61 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(display);
}
-void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir)
{
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
@@ -667,29 +667,29 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
else
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected DE HPD interrupt 0x%08x\n", iir);
}
@@ -735,37 +735,37 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, ibx_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ibx_hotplug_enables));
}
static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- ibx_hotplug_mask(encoder->hpd_pin),
- ibx_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ ibx_hotplug_mask(encoder->hpd_pin),
+ ibx_hotplug_enables(encoder));
}
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- ibx_hpd_detection_setup(dev_priv);
+ ibx_hpd_detection_setup(display);
}
static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -806,36 +806,36 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
return icp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables));
}
static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
- icp_ddi_hotplug_mask(encoder->hpd_pin),
- icp_ddi_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ icp_ddi_hotplug_mask(encoder->hpd_pin),
+ icp_ddi_hotplug_enables(encoder));
}
-static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables));
}
static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
- icp_tc_hotplug_mask(encoder->hpd_pin),
- icp_tc_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ icp_tc_hotplug_mask(encoder->hpd_pin),
+ icp_tc_hotplug_enables(encoder));
}
static void icp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -844,23 +844,23 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder)
icp_tc_hpd_enable_detection(encoder);
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* We reduce the value to 250us to be able to detect SHPD when an external display
* is connected. This is also expected of us as stated in DP1.4a Table 3-4.
*/
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- icp_ddi_hpd_detection_setup(dev_priv);
- icp_tc_hpd_detection_setup(dev_priv);
+ icp_ddi_hpd_detection_setup(display);
+ icp_tc_hpd_detection_setup(display);
}
static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
@@ -883,59 +883,59 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
return gen11_hotplug_mask(encoder->hpd_pin);
}
-static void dg1_hpd_invert(struct drm_i915_private *i915)
+static void dg1_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- dg1_hpd_invert(i915);
+ dg1_hpd_invert(display);
icp_hpd_enable_detection(encoder);
}
-static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void dg1_hpd_irq_setup(struct intel_display *display)
{
- dg1_hpd_invert(dev_priv);
- icp_hpd_irq_setup(dev_priv);
+ dg1_hpd_invert(display);
+ icp_hpd_irq_setup(display);
}
-static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
-static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tbt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
@@ -949,22 +949,23 @@ static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
icp_hpd_enable_detection(encoder);
}
-static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_irq_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
- ~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
+ intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
+ intel_de_posting_read(display, GEN11_DE_HPD_IMR);
- gen11_tc_hpd_detection_setup(dev_priv);
- gen11_tbt_hpd_detection_setup(dev_priv);
+ gen11_tc_hpd_detection_setup(display);
+ gen11_tbt_hpd_detection_setup(display);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(display);
}
static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1001,39 +1002,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
return mtp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables));
}
static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_ddi_hotplug_mask(encoder->hpd_pin),
mtp_ddi_hotplug_enables(encoder));
}
-static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables));
}
static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
-static void mtp_hpd_invert(struct drm_i915_private *i915)
+static void mtp_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
@@ -1044,49 +1045,49 @@ static void mtp_hpd_invert(struct drm_i915_private *i915)
INVERT_TC4_HPD |
INVERT_DDID_HPD_MTP |
INVERT_DDIE_HPD);
- intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mtp_hpd_invert(i915);
+ mtp_hpd_invert(display);
mtp_ddi_hpd_enable_detection(encoder);
mtp_tc_hpd_enable_detection(encoder);
}
-static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
+static void mtp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the
* SHPD_FILTER_CNT value should be.
*/
- intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- mtp_hpd_invert(i915);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ mtp_hpd_invert(display);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
-static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915)
+static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
@@ -1094,7 +1095,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
}
-static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
+static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display,
enum hpd_pin hpd_pin, bool enable)
{
u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
@@ -1103,18 +1104,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
if (!is_xelpdp_pica_hpd_pin(hpd_pin))
return;
- intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
+ intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
mask, enable ? mask : 0);
}
static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
+ _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true);
}
-static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
+static void xelpdp_pica_hpd_detection_setup(struct intel_display *display)
{
struct intel_encoder *encoder;
u32 available_pins = 0;
@@ -1122,11 +1123,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
available_pins |= BIT(encoder->hpd_pin);
for_each_hpd_pin(pin)
- _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
+ _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin));
}
static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -1135,23 +1136,24 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
mtp_hpd_enable_detection(encoder);
}
-static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
+static void xelpdp_hpd_irq_setup(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
+ intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs,
~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
+ intel_de_posting_read(display, PICAINTERRUPT_IMR);
- xelpdp_pica_hpd_detection_setup(i915);
+ xelpdp_pica_hpd_detection_setup(display);
if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
- xe2lpd_sde_hpd_irq_setup(i915);
+ xe2lpd_sde_hpd_irq_setup(display);
else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
- mtp_hpd_irq_setup(i915);
+ mtp_hpd_irq_setup(display);
}
static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1190,57 +1192,61 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
return spt_hotplug2_mask(encoder->hpd_pin);
}
-static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(dev_priv)) {
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
/* Enable digital hotplug on the PCH */
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, spt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug_enables));
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ intel_hpd_hotplug_mask(display, spt_hotplug2_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug2_enables));
}
static void spt_hpd_enable_detection(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(i915)) {
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
- CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- spt_hotplug_mask(encoder->hpd_pin),
- spt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ spt_hotplug_mask(encoder->hpd_pin),
+ spt_hotplug_enables(encoder));
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
- spt_hotplug2_mask(encoder->hpd_pin),
- spt_hotplug2_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ spt_hotplug2_mask(encoder->hpd_pin),
+ spt_hotplug2_enables(encoder));
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_irq_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 hotplug_irqs, enabled_irqs;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- spt_hpd_detection_setup(dev_priv);
+ spt_hpd_detection_setup(display);
}
static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1265,44 +1271,44 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ intel_hpd_hotplug_mask(display, ilk_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ilk_hotplug_enables));
}
static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- ilk_hotplug_mask(encoder->hpd_pin),
- ilk_hotplug_enables(encoder));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ ilk_hotplug_mask(encoder->hpd_pin),
+ ilk_hotplug_enables(encoder));
ibx_hpd_enable_detection(encoder);
}
-static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- if (DISPLAY_VER(dev_priv) >= 8)
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ if (DISPLAY_VER(display) >= 8)
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
else
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ ilk_update_display_irq(display, hotplug_irqs, enabled_irqs);
- ilk_hpd_detection_setup(dev_priv);
+ ilk_hpd_detection_setup(display);
- ibx_hpd_irq_setup(dev_priv);
+ ibx_hpd_irq_setup(display);
}
static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1344,58 +1350,59 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, bxt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, bxt_hotplug_enables));
}
static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- bxt_hotplug_mask(encoder->hpd_pin),
- bxt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ bxt_hotplug_mask(encoder->hpd_pin),
+ bxt_hotplug_enables(encoder));
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
- bxt_hpd_detection_setup(dev_priv);
+ bxt_hpd_detection_setup(display);
}
-static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+static void g45_hpd_peg_band_gap_wa(struct intel_display *display)
{
/*
* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+ intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd);
}
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
- if (IS_G45(i915))
- g45_hpd_peg_band_gap_wa(i915);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* HPD sense and interrupt enable are one and the same */
- i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
+ i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en);
}
-static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void i915_hpd_irq_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 hotplug_en;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -1404,20 +1411,20 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
* Note HDMI and DP share hotplug bits. Enable bits are the same for all
* generations.
*/
- hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915);
/*
* Programming the CRT detection parameters tends to generate a spurious
* hotplug event about three seconds later. So just do it once.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- if (IS_G45(dev_priv))
- g45_hpd_peg_band_gap_wa(dev_priv);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* Ignore TV since it's buggy */
- i915_hotplug_interrupt_update_locked(dev_priv,
+ i915_hotplug_interrupt_update_locked(display,
HOTPLUG_INT_EN_MASK |
CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
CRT_HOTPLUG_ACTIVATION_PERIOD_64,
@@ -1426,7 +1433,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
struct intel_hotplug_funcs {
/* Enable HPD sense and interrupts for all present encoders */
- void (*hpd_irq_setup)(struct drm_i915_private *i915);
+ void (*hpd_irq_setup)(struct intel_display *display);
/* Enable HPD sense for a single encoder */
void (*hpd_enable_detection)(struct intel_encoder *encoder);
};
@@ -1449,47 +1456,49 @@ HPD_FUNCS(ilk);
void intel_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_enable_detection(encoder);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_enable_detection(encoder);
}
-void intel_hpd_irq_setup(struct drm_i915_private *i915)
+void intel_hpd_irq_setup(struct intel_display *display)
{
- if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
- !i915->display.irq.vlv_display_irqs_enabled)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled)
return;
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_irq_setup(i915);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_irq_setup(display);
}
-void intel_hotplug_irq_init(struct drm_i915_private *i915)
+void intel_hotplug_irq_init(struct intel_display *display)
{
- intel_hpd_init_pins(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ intel_hpd_init_pins(display);
- intel_hpd_init_early(i915);
+ intel_hpd_init_early(display);
- if (HAS_GMCH(i915)) {
- if (I915_HAS_HOTPLUG(i915))
- i915->display.funcs.hotplug = &i915_hpd_funcs;
+ if (HAS_GMCH(display)) {
+ if (HAS_HOTPLUG(display))
+ display->funcs.hotplug = &i915_hpd_funcs;
} else {
if (HAS_PCH_DG2(i915))
- i915->display.funcs.hotplug = &icp_hpd_funcs;
+ display->funcs.hotplug = &icp_hpd_funcs;
else if (HAS_PCH_DG1(i915))
- i915->display.funcs.hotplug = &dg1_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 14)
- i915->display.funcs.hotplug = &xelpdp_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 11)
- i915->display.funcs.hotplug = &gen11_hpd_funcs;
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.funcs.hotplug = &bxt_hpd_funcs;
+ display->funcs.hotplug = &dg1_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 14)
+ display->funcs.hotplug = &xelpdp_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 11)
+ display->funcs.hotplug = &gen11_hpd_funcs;
+ else if (display->platform.geminilake || display->platform.broxton)
+ display->funcs.hotplug = &bxt_hpd_funcs;
else if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
- i915->display.funcs.hotplug = &icp_hpd_funcs;
+ display->funcs.hotplug = &icp_hpd_funcs;
else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
- i915->display.funcs.hotplug = &spt_hpd_funcs;
+ display->funcs.hotplug = &spt_hpd_funcs;
else
- i915->display.funcs.hotplug = &ilk_hpd_funcs;
+ display->funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
index e4db752df096..9063bb02a2e9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
struct intel_encoder;
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915);
+u32 i9xx_hpd_irq_ack(struct intel_display *display);
-void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status);
-void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir);
-void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir);
-void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
-void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status);
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir);
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir);
+void icp_irq_handler(struct intel_display *display, u32 pch_iir);
+void spt_irq_handler(struct intel_display *display, u32 pch_iir);
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits);
-void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask, u32 bits);
void intel_hpd_enable_detection(struct intel_encoder *encoder);
-void intel_hpd_irq_setup(struct drm_i915_private *i915);
+void intel_hpd_irq_setup(struct intel_display *display);
-void intel_hotplug_irq_init(struct drm_i915_private *i915);
+void intel_hotplug_irq_init(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index fb6b84f6a81d..dc454420c134 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index 86cc03a4413c..aad52d0d83e1 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 63c1afa30b05..f94b7eeae20f 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "i915_reg.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 19f52d1659fa..89d26913e253 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -84,12 +84,13 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *i915,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val;
- val = intel_de_read(i915, lvds_reg);
+ val = intel_de_read(display, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(i915))
@@ -104,7 +105,6 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -113,7 +113,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(display, lvds_encoder->reg, pipe);
intel_display_power_put(display, encoder->power_domain, wakeref);
@@ -123,13 +123,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = intel_de_read(dev_priv, lvds_encoder->reg);
+ tmp = intel_de_read(display, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -141,13 +141,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.flags |= flags;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (DISPLAY_VER(dev_priv) < 4) {
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
+ if (DISPLAY_VER(display) < 4) {
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE;
}
@@ -155,24 +155,24 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
-static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_get_hw_state(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- pps->powerdown_on_reset = intel_de_read(dev_priv,
- PP_CONTROL(dev_priv, 0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(display,
+ PP_CONTROL(display, 0)) & PANEL_POWER_RESET;
- val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_ON_DELAYS(display, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_OFF_DELAYS(display, 0));
pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
+ val = intel_de_read(display, PP_DIVISOR(display, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -185,12 +185,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->delays.power_cycle = val * 1000;
- if (DISPLAY_VER(dev_priv) < 5 &&
+ if (DISPLAY_VER(display) < 5 &&
pps->delays.power_up == 0 &&
pps->delays.backlight_on == 0 &&
pps->delays.power_down == 0 &&
pps->delays.backlight_off == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
@@ -201,7 +201,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
+ drm_dbg(display->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
pps->delays.power_up, pps->delays.power_down,
pps->delays.power_cycle, pps->delays.backlight_on,
@@ -209,28 +209,28 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->port, pps->powerdown_on_reset);
}
-static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_init_hw(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- val = intel_de_read(dev_priv, PP_CONTROL(dev_priv, 0));
- drm_WARN_ON(&dev_priv->drm,
+ val = intel_de_read(display, PP_CONTROL(display, 0));
+ drm_WARN_ON(display->drm,
(val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, PP_CONTROL(dev_priv, 0), val);
+ intel_de_write(display, PP_CONTROL(display, 0), val);
- intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_ON_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
- intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_OFF_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
- intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
+ intel_de_write(display, PP_DIVISOR(display, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
@@ -256,7 +256,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
assert_pll_disabled(display, pipe);
}
- intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(display, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
@@ -296,7 +296,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -312,7 +312,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(i915, lvds_encoder->reg, temp);
+ intel_de_write(display, lvds_encoder->reg, temp);
}
/*
@@ -323,16 +323,16 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
+ intel_de_rmw(display, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), 0, PANEL_POWER_ON);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
+ intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ drm_err(display->drm,
"timed out waiting for panel to power on\n");
intel_backlight_enable(crtc_state, conn_state);
@@ -343,16 +343,16 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 1000))
- drm_err(&dev_priv->drm,
+ intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ drm_err(display->drm,
"timed out waiting for panel to power off\n");
- intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, lvds_encoder->reg, LVDS_PORT_EN, 0);
+ intel_de_posting_read(display, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_atomic_state *state,
@@ -384,10 +384,10 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state,
static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
@@ -420,6 +420,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *connector = lvds_encoder->attached_connector;
@@ -429,8 +430,8 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
- drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(display) < 4 && crtc->pipe == 0) {
+ drm_err(display->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -447,7 +448,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
crtc_state->pipe_bpp, lvds_bpp);
crtc_state->pipe_bpp = lvds_bpp;
@@ -775,11 +776,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -787,15 +788,16 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
+bool intel_is_dual_link_lvds(struct intel_display *display)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(display);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
+ struct intel_display *display = to_intel_display(&lvds_encoder->base);
struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
@@ -803,8 +805,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
unsigned int val;
/* use the module option value if specified */
- if (i915->display.params.lvds_channel_mode > 0)
- return i915->display.params.lvds_channel_mode == 2;
+ if (display->params.lvds_channel_mode > 0)
+ return display->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -819,7 +821,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(i915, lvds_encoder->reg);
+ val = intel_de_read(display, lvds_encoder->reg);
if (HAS_PCH_CPT(i915))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
@@ -837,14 +839,14 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @i915: i915 device
+ * @display: display device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *i915)
+void intel_lvds_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -855,13 +857,13 @@ void intel_lvds_init(struct drm_i915_private *i915)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
+ drm_WARN(display->drm, !display->vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!i915->display.vbt.int_lvds_support) {
- drm_dbg_kms(&i915->drm,
+ if (!display->vbt.int_lvds_support) {
+ drm_dbg_kms(display->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
@@ -871,7 +873,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
else
lvds_reg = LVDS;
- lvds = intel_de_read(i915, lvds_reg);
+ lvds = intel_de_read(display, lvds_reg);
if (HAS_PCH_SPLIT(i915)) {
if ((lvds & LVDS_DETECTED) == 0)
@@ -881,11 +883,11 @@ void intel_lvds_init(struct drm_i915_private *i915)
ddc_pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -902,12 +904,12 @@ void intel_lvds_init(struct drm_i915_private *i915)
lvds_encoder->attached_connector = connector;
encoder = &lvds_encoder->base;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
intel_gmbus_get_adapter(display, ddc_pin));
- drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
encoder->enable = intel_enable_lvds;
@@ -931,7 +933,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
encoder->port = PORT_NONE;
encoder->cloneable = 0;
- if (DISPLAY_VER(i915) < 4)
+ if (DISPLAY_VER(display) < 4)
encoder->pipe_mask = BIT(PIPE_B);
else
encoder->pipe_mask = ~0;
@@ -943,7 +945,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(display, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -958,7 +960,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
drm_edid = drm_edid_read_switcheroo(&connector->base, connector->base.ddc);
else
@@ -991,7 +993,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
if (!intel_panel_preferred_fixed_mode(connector))
intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
if (!intel_panel_preferred_fixed_mode(connector))
@@ -1002,7 +1004,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(display->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1010,7 +1012,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
return;
failed:
- drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_dbg_kms(display->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(&connector->base);
drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index 7ad5fa9c0434..a6db1706a97c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
@@ -11,28 +11,28 @@
#include "i915_reg_defs.h"
enum pipe;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+void intel_lvds_init(struct intel_display *display);
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display);
+bool intel_is_dual_link_lvds(struct intel_display *display);
#else
-static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
return false;
}
-static inline void intel_lvds_init(struct drm_i915_private *dev_priv)
+static inline void intel_lvds_init(struct intel_display *display)
{
}
-static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+static inline struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
return NULL;
}
-static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+static inline bool intel_is_dual_link_lvds(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 312b21b1ab59..9e963bce340f 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -155,9 +155,8 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -169,7 +168,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
reset_crtc_encoder_state(crtc);
intel_fbc_disable(crtc);
- intel_update_watermarks(i915);
+ intel_update_watermarks(display);
intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
@@ -821,18 +820,18 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+ if (crtc_state->hw.active) {
intel_crtc_update_active_timings(crtc_state,
crtc_state->vrr.enable);
@@ -874,7 +873,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* TODO move here (or even earlier?) on all platforms */
if (DISPLAY_VER(display) >= 9)
- intel_wm_get_hw_state(i915);
+ intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
@@ -947,7 +946,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(i915);
- intel_pch_sanitize(i915);
+ intel_pch_sanitize(display);
intel_cmtg_sanitize(display);
@@ -988,8 +987,8 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
/* TODO move earlier on all platforms */
if (DISPLAY_VER(display) < 9)
- intel_wm_get_hw_state(i915);
- intel_wm_sanitize(i915);
+ intel_wm_get_hw_state(display);
+ intel_wm_sanitize(display);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 99f6d6f53fa7..b909ed18a5b2 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -20,9 +20,11 @@
#include "intel_pps.h"
#include "intel_sdvo.h"
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
(HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
}
@@ -37,11 +39,11 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_dp_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe port_pipe;
bool state;
@@ -57,11 +59,11 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
port_name(port));
}
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_hdmi_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe port_pipe;
bool state;
@@ -77,15 +79,14 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
port_name(port));
}
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_ports_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+ assert_pch_dp_disabled(display, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(display, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(display, pipe, PORT_D, PCH_DP_D);
INTEL_DISPLAY_STATE_WARN(display,
intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe,
@@ -93,20 +94,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
+ intel_lvds_port_enabled(display, PCH_LVDS, &port_pipe) && port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+ assert_pch_hdmi_disabled(display, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(display, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(display, pipe, PORT_D, PCH_HDMID);
}
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_transcoder_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
bool enabled;
@@ -117,45 +117,45 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_hdmi_port(struct intel_display *display,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = intel_de_read(dev_priv, hdmi_reg);
+ u32 val = intel_de_read(display, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for HDMI %c\n",
port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, hdmi_reg, val);
+ intel_de_write(display, hdmi_reg, val);
}
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_dp_port(struct intel_display *display,
enum port port, i915_reg_t dp_reg)
{
- u32 val = intel_de_read(dev_priv, dp_reg);
+ u32 val = intel_de_read(display, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for DP %c\n",
port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, dp_reg, val);
+ intel_de_write(display, dp_reg, val);
}
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+static void ibx_sanitize_pch_ports(struct intel_display *display)
{
/*
* The BIOS may select transcoder B on some of the PCH
@@ -168,14 +168,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
* (see. intel_dp_link_down(), intel_disable_hdmi(),
* intel_disable_sdvo()).
*/
- ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+ ibx_sanitize_pch_dp_port(display, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(display, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(display, PORT_D, PCH_DP_D);
/* PCH SDVOB multiplex with HDMIB */
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+ ibx_sanitize_pch_hdmi_port(display, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(display, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(display, PORT_D, PCH_HDMID);
}
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
@@ -225,31 +225,30 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_HBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)));
+
+ intel_de_write(display, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_VBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNCSHIFT(display, cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -326,18 +325,18 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
+ assert_pch_ports_disabled(display, pipe);
reg = PCH_TRANSCONF(pipe);
- intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
+ intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv))
/* Workaround: Clear the timing override chicken bit again. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN2(pipe),
TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
@@ -366,14 +365,14 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
u32 temp;
- assert_pch_transcoder_disabled(dev_priv, pipe);
+ assert_pch_transcoder_disabled(display, pipe);
/* For PCH output, training FDI link */
intel_fdi_link_train(crtc, crtc_state);
@@ -459,23 +458,28 @@ void ilk_pch_disable(struct intel_atomic_state *state,
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
ilk_disable_pch_transcoder(crtc);
if (HAS_PCH_CPT(dev_priv)) {
/* disable TRANS_DP_CTL */
- intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(pipe),
TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ intel_de_rmw(display, PCH_DPLL_SEL,
TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
+
+ intel_disable_shared_dpll(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -497,8 +501,8 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum pipe pipe = crtc->pipe;
@@ -550,8 +554,6 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
@@ -559,49 +561,49 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
assert_fdi_rx_enabled(display, PIPE_A);
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(display, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(display, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ pipeconf_val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ intel_de_write(display, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+ drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+static void lpt_disable_pch_transcoder(struct intel_display *display)
{
- intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
+ intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+ drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
+ intel_de_rmw(display, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+ assert_pch_transcoder_disabled(display, PIPE_A);
lpt_program_iclkip(crtc_state);
@@ -614,36 +616,38 @@ void lpt_pch_enable(struct intel_atomic_state *state,
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- lpt_disable_pch_transcoder(dev_priv);
+ lpt_disable_pch_transcoder(display);
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
}
void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ if ((intel_de_read(display, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(display, FDI_RX_CTL(PIPE_A));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(display);
}
-void intel_pch_sanitize(struct drm_i915_private *i915)
+void intel_pch_sanitize(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (HAS_PCH_IBX(i915))
- ibx_sanitize_pch_ports(i915);
+ ibx_sanitize_pch_ports(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
index 35f8288af3d1..cd6b3ed05887 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -9,14 +9,14 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_link_m_n;
#ifdef I915
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -41,9 +41,9 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
-void intel_pch_sanitize(struct drm_i915_private *i915);
+void intel_pch_sanitize(struct intel_display *display);
#else
-static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+static inline bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
return false;
@@ -90,7 +90,7 @@ static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
}
-static inline void intel_pch_sanitize(struct drm_i915_private *i915)
+static inline void intel_pch_sanitize(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 33467de3d115..1307a478861a 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -11,27 +11,28 @@
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
-static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_reset_mphy(struct intel_display *display)
{
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
+ intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset assert timeout\n");
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
+ intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
-static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_program_mphy(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
- lpt_fdi_reset_mphy(dev_priv);
+ lpt_fdi_reset_mphy(display);
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@@ -103,11 +104,12 @@ static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 temp;
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
intel_sbi_lock(dev_priv);
@@ -175,24 +177,25 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
lpt_compute_iclkip(&p, clock);
- drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
+ drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
@@ -224,15 +227,16 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+int lpt_get_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct iclkip_params p;
u32 temp;
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
iclkip_params_init(&p);
@@ -268,15 +272,16 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ if (drm_WARN(display->drm, with_fdi && !with_spread,
"FDI requires downspread\n"))
with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ if (drm_WARN(display->drm, HAS_PCH_LPT_LP(dev_priv) &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
@@ -295,7 +300,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
+ lpt_fdi_program_mphy(display);
}
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
@@ -307,8 +312,9 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
}
/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
intel_sbi_lock(dev_priv);
@@ -364,15 +370,16 @@ static const u16 sscdivintphase[] = {
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
int idx = BEND_IDX(steps);
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ if (drm_WARN_ON(display->drm, steps % 5 != 0))
return;
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
intel_sbi_lock(dev_priv);
@@ -393,10 +400,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+static bool spll_uses_pch_ssc(struct intel_display *display)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -405,18 +412,17 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
- if (IS_BROADWELL(dev_priv) &&
+ if (display->platform.broadwell &&
(ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
return true;
return false;
}
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
+static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -424,7 +430,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
+ if ((display->platform.broadwell || display->platform.haswell_ult) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
@@ -432,12 +438,12 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
return false;
}
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void lpt_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
bool has_fdi = false;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_fdi = true;
@@ -462,37 +468,37 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->display.dpll.pch_ssc_use = 0;
+ display->dpll.pch_ssc_use = 0;
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ if (spll_uses_pch_ssc(display)) {
+ drm_dbg_kms(display->drm, "SPLL using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->display.dpll.pch_ssc_use)
+ if (display->dpll.pch_ssc_use)
return;
if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
+ lpt_bend_clkout_dp(display, 0);
+ lpt_enable_clkout_dp(display, true, true);
} else {
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
}
}
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int i;
@@ -607,7 +613,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ drm_dbg_kms(display->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
val &= ~DREF_SSC1_ENABLE;
@@ -623,7 +629,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else {
@@ -670,10 +676,12 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+void intel_init_pch_refclk(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
+ ilk_init_pch_refclk(display);
else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
+ lpt_init_pch_refclk(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index ae3403c0ced8..25cc53c568bc 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -8,25 +8,25 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
#ifdef I915
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct intel_display *display);
+int lpt_get_iclkip(struct intel_display *display);
int lpt_iclkip(const struct intel_crtc_state *crtc_state);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct intel_display *display);
+void lpt_disable_clkout_dp(struct intel_display *display);
#else
static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
}
-static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_iclkip(struct intel_display *display)
{
}
-static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+static inline int lpt_get_iclkip(struct intel_display *display)
{
return 0;
}
@@ -34,10 +34,10 @@ static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+static inline void intel_init_pch_refclk(struct intel_display *display)
{
}
-static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_clkout_dp(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 10e26c3db946..6182f484b5bd 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -75,7 +75,7 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_auto_source(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source)
{
@@ -85,8 +85,8 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- drm_modeset_lock_all(&dev_priv->drm);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ drm_modeset_lock_all(display->drm);
+ for_each_intel_encoder(display->drm, encoder) {
if (!encoder->base.crtc)
continue;
@@ -113,7 +113,7 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n",
+ drm_WARN(display->drm, 1, "nonexisting DP port %c\n",
port_name(dig_port->base.port));
break;
}
@@ -122,10 +122,10 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
}
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int vlv_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -133,7 +133,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -148,7 +148,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
+ if (!display->platform.cherryview)
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
@@ -170,7 +170,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -186,26 +186,26 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
+ if (!SUPPORTS_TV(display))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
@@ -229,10 +229,10 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+static void vlv_undo_pipe_scramble_reset(struct intel_display *display,
enum pipe pipe)
{
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
switch (pipe) {
case PIPE_A:
@@ -249,7 +249,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -281,18 +281,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void
intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- i915gm_irq_cstate_wa(dev_priv, enable);
+ if (display->platform.i945gm || display->platform.i915gm)
+ i915gm_irq_cstate_wa(display, enable);
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -311,7 +311,7 @@ retry:
pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->uapi.mode_changed = true;
@@ -327,13 +327,13 @@ put_state:
drm_atomic_state_put(state);
unlock:
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int ivb_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int skl_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int get_new_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return i8xx_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.ironlake || display->platform.sandybridge)
return ilk_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_pipe_crc_ctl_reg(display, pipe, source, val);
else
- return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ return skl_pipe_crc_ctl_reg(display, pipe, source, val);
}
static int
@@ -447,7 +447,7 @@ void intel_crtc_crc_init(struct intel_crtc *crtc)
spin_lock_init(&pipe_crc->lock);
}
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i8xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -459,7 +459,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i9xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -472,7 +472,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+static int vlv_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -487,7 +487,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ilk_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -501,7 +501,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ivb_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -515,7 +515,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+static int skl_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -535,21 +535,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
}
static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+intel_is_valid_crc_source(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
- if (DISPLAY_VER(dev_priv) == 2)
- return i8xx_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_crc_source_valid(dev_priv, source);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_crc_source_valid(dev_priv, source);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
- return ilk_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_crc_source_valid(dev_priv, source);
+ if (DISPLAY_VER(display) == 2)
+ return i8xx_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_crc_source_valid(display, source);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_crc_source_valid(display, source);
+ else if (display->platform.ironlake || display->platform.sandybridge)
+ return ilk_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_crc_source_valid(display, source);
else
- return skl_crc_source_valid(dev_priv, source);
+ return skl_crc_source_valid(display, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -562,16 +562,16 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
- intel_is_valid_crc_source(dev_priv, source) == 0) {
+ intel_is_valid_crc_source(display, source) == 0) {
*values_cnt = 5;
return 0;
}
@@ -583,7 +583,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
@@ -594,14 +593,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -610,17 +609,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
if (enable)
intel_crtc_crc_setup_workarounds(crtc, true);
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ ret = get_new_crc_ctl_reg(display, pipe, &source, &val);
if (ret != 0)
goto out;
pipe_crc->source = source;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
if (!source) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ if (display->platform.valleyview || display->platform.cherryview)
+ vlv_undo_pipe_scramble_reset(display, pipe);
}
pipe_crc->skipped = 0;
@@ -636,7 +635,7 @@ out:
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -644,19 +643,20 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
if (!crtc->base.crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
+ if (get_new_crc_ctl_reg(display, pipe, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
@@ -665,7 +665,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), 0);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index b1675b46e06c..b0c4892775ce 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -52,44 +52,55 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
return false;
}
+static enum intel_memory_type
+initial_plane_memory_type(struct drm_i915_private *i915)
+{
+ if (IS_DGFX(i915))
+ return INTEL_MEMORY_LOCAL;
+ else if (HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return INTEL_MEMORY_STOLEN_LOCAL;
+ else
+ return INTEL_MEMORY_STOLEN_SYSTEM;
+}
+
static bool
-initial_plane_phys_lmem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
+initial_plane_phys(struct intel_display *display,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_memory_region *mem;
+ enum intel_memory_type mem_type;
+ bool is_present, is_local;
dma_addr_t dma_addr;
- gen8_pte_t pte;
u32 base;
+ mem_type = initial_plane_memory_type(i915);
+ mem = intel_memory_region_by_type(i915, mem_type);
+ if (!mem) {
+ drm_dbg_kms(display->drm,
+ "Initial plane memory region (type %s) not initialized\n",
+ intel_memory_type_str(mem_type));
+ return false;
+ }
+
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- gte += base / I915_GTT_PAGE_SIZE;
+ dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local);
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
+ if (!is_present) {
drm_err(display->drm,
- "Initial plane programming missing PTE_LM bit\n");
+ "Initial plane FB PTE not present\n");
return false;
}
- dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
-
- if (IS_DGFX(i915))
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
- else
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
+ if (intel_memory_type_is_local(mem->type) != is_local) {
+ drm_err(display->drm,
+ "Initial plane FB PTE unsuitable for %s\n",
+ mem->region.name);
return false;
}
- /*
- * On lmem we don't currently expect this to
- * ever be placed in the stolen portion.
- */
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
@@ -107,42 +118,6 @@ initial_plane_phys_lmem(struct intel_display *display,
return true;
}
-static bool
-initial_plane_phys_smem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_memory_region *mem;
- u32 base;
-
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
-
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
- return false;
- }
-
- /* FIXME get and validate the dma_addr from the PTE */
- plane_config->phys_base = base;
- plane_config->mem = mem;
-
- return true;
-}
-
-static bool
-initial_plane_phys(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(display, plane_config);
- else
- return initial_plane_phys_smem(display, plane_config);
-}
-
static struct i915_vma *
initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 63301a01906c..d22b5469672d 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,6 +5,8 @@
#include <linux/bitops.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 617ce4993172..4d4e2b9f5f2d 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -91,7 +91,6 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -134,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = display->platform.cherryview &&
!chv_phy_powergate_ch(display, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
+ if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) {
drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
@@ -158,7 +157,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
+ vlv_force_pll_off(display, pipe);
if (release_cl_override)
chv_phy_powergate_ch(display, phy, ch, false);
@@ -744,11 +743,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return false;
+ lockdep_assert_held(&display->pps.mutex);
+
cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
intel_dp->pps.want_panel_vdd = true;
@@ -925,11 +924,11 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return;
+ lockdep_assert_held(&display->pps.mutex);
+
INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
@@ -1855,7 +1854,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
@@ -1883,7 +1882,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, LVDS, &panel_pipe);
}
val = intel_de_read(display, pp_reg);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4e938bad808c..eef48c014112 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -36,6 +36,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -463,8 +464,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
if (DISPLAY_VER(display) >= 9) {
u32 val;
- val = intel_de_rmw(dev_priv,
- PSR_EVENT(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ PSR_EVENT(display, cpu_transcoder),
0, 0);
psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
@@ -689,7 +690,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -704,7 +704,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
@@ -839,7 +839,6 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
if (DISPLAY_VER(display) >= 11)
@@ -873,7 +872,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
* WA 0479: hsw,bdw
* "Do not skip both TP1 and TP2/TP3"
*/
- if (DISPLAY_VER(dev_priv) < 9 &&
+ if (DISPLAY_VER(display) < 9 &&
connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
@@ -909,7 +908,6 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
@@ -919,7 +917,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr.link_standby)
@@ -1013,14 +1011,13 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
val |= EDP_SU_TRACK_ENABLE;
if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
@@ -1038,7 +1035,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1103,9 +1100,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
@@ -1183,10 +1178,9 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1197,7 +1191,6 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
@@ -1223,7 +1216,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1264,7 +1257,6 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1286,7 +1278,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1412,7 +1404,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1421,20 +1412,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
- if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG2(dev_priv)) {
+ if (display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not completely functional in this stepping\n");
return false;
@@ -1453,7 +1444,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1486,7 +1477,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
@@ -1604,6 +1595,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1634,12 +1631,6 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
- if (crtc_state->crc_enabled) {
- drm_dbg_kms(display->drm,
- "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
- return false;
- }
-
return true;
}
@@ -1827,7 +1818,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1866,7 +1856,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(display) < 20)
@@ -1880,7 +1870,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
@@ -1925,7 +1915,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv)))
+ display->platform.alderlake_p))
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
@@ -1936,7 +1926,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
@@ -2024,7 +2014,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
- intel_dp->psr.paused = false;
+ intel_dp->psr.pause_counter = 0;
/*
* Link_ok is sticky and set here on PSR enable. We can assume link
@@ -2104,7 +2094,6 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2136,7 +2125,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2210,7 +2199,6 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2223,12 +2211,10 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
- /* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(display->drm, psr->paused);
-
- intel_psr_exit(intel_dp);
- intel_psr_wait_exit_locked(intel_dp);
- psr->paused = true;
+ if (intel_dp->psr.pause_counter++ == 0) {
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ }
mutex_unlock(&psr->lock);
@@ -2244,6 +2230,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
*/
void intel_psr_resume(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2251,13 +2238,18 @@ void intel_psr_resume(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
- if (!psr->paused)
- goto unlock;
+ if (!psr->enabled)
+ goto out;
- psr->paused = false;
- intel_psr_activate(intel_dp);
+ if (!psr->pause_counter) {
+ drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
+ goto out;
+ }
-unlock:
+ if (--intel_dp->psr.pause_counter == 0)
+ intel_psr_activate(intel_dp);
+
+out:
mutex_unlock(&psr->lock);
}
@@ -2314,35 +2306,27 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
@@ -2405,8 +2389,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
@@ -2420,7 +2402,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2474,13 +2456,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
+ (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2601,12 +2582,11 @@ static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
+ display->platform.alderlake_p || display->platform.tigerlake)) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2807,7 +2787,6 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -2839,7 +2818,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
new_crtc_state->has_sel_update != psr->sel_update_enabled ||
new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
- (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
intel_psr_disable_locked(intel_dp);
else if (new_crtc_state->wm_level_disabled)
/* Wa_14015648006 */
@@ -3322,7 +3301,7 @@ void intel_psr_flush(struct intel_display *display,
* we have to ensure that the PSR is not activated until
* intel_psr_resume() is called.
*/
- if (intel_dp->psr.paused)
+ if (intel_dp->psr.pause_counter)
goto unlock;
if (origin == ORIGIN_FLIP ||
@@ -3634,8 +3613,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
const char *status = "unknown";
u32 val, status_val;
- if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
- intel_dp->psr.panel_replay_enabled)) {
+ if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
+ (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -3728,10 +3707,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool enabled;
u32 val, psr2_ctl;
@@ -3740,7 +3718,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&psr->lock);
intel_psr_print_mode(intel_dp, m);
@@ -3822,7 +3800,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -3853,9 +3831,7 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct intel_display *display = data;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(display))
@@ -3866,12 +3842,9 @@ i915_edp_psr_debug_set(void *data, u64 val)
drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
// TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ ret = intel_psr_debug_set(intel_dp, val);
}
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6e2d9929b4d7..757b9ce7e3b1 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2036,7 +2036,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct intel_display *display = to_intel_display(&intel_sdvo->base);
u16 hotplug;
- if (!I915_HAS_HOTPLUG(display))
+ if (!HAS_HOTPLUG(display))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b9acd9fe160c..2b53ac9f4935 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,8 @@
#include <linux/math.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8d14ed8a56e..c1014e74791f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,8 +3,10 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -92,11 +94,6 @@ static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
return dig_port->tc;
}
-static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
-{
- return to_i915(tc->dig_port->base.base.dev);
-}
-
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
@@ -219,10 +216,11 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma
static void
tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
{
+ struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
+ drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
#endif
__tc_cold_unblock(tc, domain, wakeref);
}
@@ -266,13 +264,13 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 lane_mask;
- lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
- drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
@@ -281,13 +279,13 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 pin_mask;
- pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
+ pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
- drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
@@ -297,13 +295,12 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
pin_assignment =
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
@@ -369,7 +366,7 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
@@ -377,10 +374,10 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
assert_tc_cold_blocked(tc);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_tc_port_get_max_lane_count(dig_port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return mtl_tc_port_get_max_lane_count(dig_port);
return intel_tc_port_get_max_lane_count(dig_port);
@@ -389,20 +386,20 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool lane_reversal = dig_port->lane_reversal;
u32 val;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
switch (required_lanes) {
@@ -423,16 +420,16 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
MISSING_CASE(required_lanes);
}
- intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
u32 live_status_mask)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 valid_hpd_mask;
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
if (hweight32(live_status_mask) != 1)
return;
@@ -447,7 +444,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
tc->port_name, live_status_mask, valid_hpd_mask);
@@ -490,21 +487,20 @@ icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
- u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
intel_wakeref_t wakeref;
u32 fia_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
- fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
- pch_isr = intel_de_read(i915, SDEISR);
+ fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ pch_isr = intel_de_read(display, SDEISR);
}
if (fia_isr == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, nothing connected\n",
tc->port_name);
return mask;
@@ -531,14 +527,14 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -550,14 +546,14 @@ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
tc->port_name, take ? "take" : "release");
@@ -568,21 +564,21 @@ static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
- intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
return true;
}
static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assume not owned\n",
tc->port_name);
return false;
@@ -619,30 +615,30 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
int max_lanes;
max_lanes = intel_tc_port_max_lane_count(dig_port);
if (tc->mode == TC_PORT_LEGACY) {
- drm_WARN_ON(&i915->drm, max_lanes != 4);
+ drm_WARN_ON(display->drm, max_lanes != 4);
return true;
}
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
tc->port_name);
return false;
}
if (max_lanes < required_lanes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY max lanes %d < required lanes %d\n",
tc->port_name,
max_lanes, required_lanes);
@@ -655,7 +651,7 @@ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
static bool icl_tc_phy_connect(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
tc->lock_wakeref = tc_cold_block(tc);
@@ -664,8 +660,8 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
tc->port_name,
str_yes_no(tc_phy_is_ready(tc)));
goto out_unblock_tc_cold;
@@ -733,14 +729,13 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
intel_wakeref_t wakeref;
u32 val;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
- val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
+ val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, val == 0xffffffff);
+ drm_WARN_ON(display->drm, val == 0xffffffff);
tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
}
@@ -775,19 +770,18 @@ adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 cpu_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
@@ -810,15 +804,15 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -830,12 +824,12 @@ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
assert_tc_port_power_enabled(tc);
- intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
@@ -843,13 +837,13 @@ static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
u32 val;
assert_tc_port_power_enabled(tc);
- val = intel_de_read(i915, DDI_BUF_CTL(port));
+ val = intel_de_read(display, DDI_BUF_CTL(port));
return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -872,7 +866,6 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
@@ -885,15 +878,15 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
port_wakeref = intel_display_power_get(display, port_power_domain);
if (!adlp_tc_phy_take_ownership(tc, true) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
tc->port_name);
goto out_put_port_power;
}
if (!tc_phy_is_ready(tc) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
tc->port_name);
goto out_release_phy;
}
@@ -965,19 +958,18 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 pica_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
@@ -994,22 +986,22 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
tc->port_name);
@@ -1069,7 +1061,7 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
@@ -1082,7 +1074,7 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
return true;
out_disable:
- if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
return false;
if (!enable)
@@ -1096,35 +1088,35 @@ out_disable:
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ val = intel_de_read(display, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
@@ -1134,7 +1126,7 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
if (tc->mode != TC_PORT_DISCONNECTED)
tc->lock_wakeref = tc_cold_block(tc);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1207,13 +1199,13 @@ tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 mask;
mask = tc->phy_ops->hpd_live_status(tc);
/* The sink can be connected only in a single mode. */
- drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
+ drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
return mask;
}
@@ -1236,9 +1228,9 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
bool phy_is_ready, bool phy_is_owned)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned;
}
@@ -1246,8 +1238,7 @@ static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
static bool tc_phy_is_connected(struct intel_tc_port *tc,
enum icl_port_dpll_id port_pll_type)
{
- struct intel_encoder *encoder = &tc->dig_port->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(tc->dig_port);
bool phy_is_ready = tc_phy_is_ready(tc);
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
@@ -1257,7 +1248,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
tc->port_name,
str_yes_no(is_connected),
@@ -1270,10 +1261,10 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(tc_phy_is_ready(tc), 500)) {
- drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
return false;
@@ -1343,7 +1334,7 @@ get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
bool phy_is_ready;
bool phy_is_owned;
@@ -1363,11 +1354,11 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
- drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
tc->port_name,
tc_port_mode_name(mode),
@@ -1407,7 +1398,7 @@ tc_phy_get_target_mode(struct intel_tc_port *tc)
static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 live_status_mask = tc_phy_hpd_live_status(tc);
bool connected;
@@ -1421,7 +1412,7 @@ static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
connected = tc->phy_ops->connect(tc, required_lanes);
}
- drm_WARN_ON(&i915->drm, !connected);
+ drm_WARN_ON(display->drm, !connected);
}
static void tc_phy_disconnect(struct intel_tc_port *tc)
@@ -1491,12 +1482,12 @@ static void __intel_tc_port_put_link(struct intel_tc_port *tc)
static bool tc_port_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
assert_tc_port_power_enabled(tc);
- return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
DDI_BUF_CTL_ENABLE;
}
@@ -1509,15 +1500,15 @@ static bool tc_port_is_enabled(struct intel_tc_port *tc)
*/
void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool update_mode = false;
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->lock_wakeref);
- drm_WARN_ON(&i915->drm, tc->link_refcount);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->lock_wakeref);
+ drm_WARN_ON(display->drm, tc->link_refcount);
tc_phy_get_hw_state(tc);
/*
@@ -1540,8 +1531,8 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
if (!tc_port_is_enabled(tc)) {
update_mode = true;
} else if (tc->mode == TC_PORT_DISCONNECTED) {
- drm_WARN_ON(&i915->drm, !tc->legacy_port);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, !tc->legacy_port);
+ drm_err(display->drm,
"Port %s: PHY disconnected on enabled port, connecting it\n",
tc->port_name);
update_mode = true;
@@ -1556,28 +1547,28 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
mutex_unlock(&tc->lock);
}
-static bool tc_port_has_active_links(struct intel_tc_port *tc,
- const struct intel_crtc_state *crtc_state)
+static bool tc_port_has_active_streams(struct intel_tc_port *tc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
- int active_links = 0;
+ int active_streams = 0;
if (dig_port->dp.is_mst) {
/* TODO: get the PLL type for MST, once HW readout is done for it. */
- active_links = intel_dp_mst_encoder_active_links(dig_port);
+ active_streams = intel_dp_mst_active_streams(&dig_port->dp);
} else if (crtc_state && crtc_state->hw.active) {
pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
- active_links = 1;
+ active_streams = 1;
}
- if (active_links && !tc_phy_is_connected(tc, pll_type))
- drm_err(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- tc->port_name, active_links);
+ if (active_streams && !tc_phy_is_connected(tc, pll_type))
+ drm_err(display->drm,
+ "Port %s: PHY disconnected with %d active stream(s)\n",
+ tc->port_name, active_streams);
- return active_links;
+ return active_streams;
}
/**
@@ -1595,13 +1586,13 @@ static bool tc_port_has_active_links(struct intel_tc_port *tc,
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
- if (!tc_port_has_active_links(tc, crtc_state)) {
+ drm_WARN_ON(display->drm, tc->link_refcount != 1);
+ if (!tc_port_has_active_streams(tc, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
@@ -1610,7 +1601,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
if (tc->init_mode != TC_PORT_TBT_ALT &&
tc->init_mode != TC_PORT_DISCONNECTED)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
tc->port_name,
tc_port_mode_name(tc->init_mode));
@@ -1618,7 +1609,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
tc->port_name,
tc_port_mode_name(tc->mode));
@@ -1637,12 +1628,12 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 mask = ~0;
- drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
if (tc->mode != TC_PORT_DISCONNECTED)
mask = BIT(tc->mode);
@@ -1677,14 +1668,14 @@ static int reset_link_commit(struct intel_tc_port *tc,
struct intel_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
struct intel_crtc *crtc;
u8 pipe_mask;
int ret;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
if (ret)
return ret;
@@ -1695,7 +1686,7 @@ static int reset_link_commit(struct intel_tc_port *tc,
if (!pipe_mask)
return 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -1713,13 +1704,13 @@ static int reset_link_commit(struct intel_tc_port *tc,
static int reset_link(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *_state;
struct intel_atomic_state *state;
int ret;
- _state = drm_atomic_state_alloc(&i915->drm);
+ _state = drm_atomic_state_alloc(display->drm);
if (!_state)
return -ENOMEM;
@@ -1738,21 +1729,21 @@ static void intel_tc_port_link_reset_work(struct work_struct *work)
{
struct intel_tc_port *tc =
container_of(work, struct intel_tc_port, link_reset_work.work);
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
int ret;
if (!__intel_tc_port_link_needs_reset(tc))
return;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: TypeC DP-alt sink disconnected, resetting link\n",
tc->port_name);
ret = reset_link(tc);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
@@ -1780,7 +1771,7 @@ void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
static void __intel_tc_port_lock(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
mutex_lock(&tc->lock);
@@ -1790,9 +1781,8 @@ static void __intel_tc_port_lock(struct intel_tc_port *tc,
intel_tc_port_update_mode(tc, required_lanes,
false);
- drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
- !tc_phy_is_owned(tc));
+ drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
@@ -1885,12 +1875,12 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
tc = kzalloc(sizeof(*tc), GFP_KERNEL);
@@ -1900,11 +1890,11 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc = tc;
tc->dig_port = dig_port;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
tc->phy_ops = &xelpdp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
tc->phy_ops = &adlp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tc->phy_ops = &tgl_tc_phy_ops;
else
tc->phy_ops = &icl_tc_phy_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 5dbe857ea85b..2e3f3f0207e8 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1594,7 +1594,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
spin_lock_irq(&dev_priv->irq_lock);
- i915_disable_pipestat(dev_priv, 0,
+ i915_disable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1669,7 +1669,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, 0,
+ i915_enable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 4efd4f7d497a..139fa5deba80 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -222,12 +222,15 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (DISPLAY_VER(display) == 2)
- return -1;
- else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return 2;
- else
+ if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
+ return 1;
+ else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ? 2 : 1;
+ else if (DISPLAY_VER(display) >= 3)
return 1;
+ else
+ return -1;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3ed64c17bdff..8e799e225af1 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,6 +9,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_crtc.h"
@@ -259,6 +260,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
return 0;
}
+static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+
+ return vdsc_cfg->dsc_version_major == 1 &&
+ vdsc_cfg->dsc_version_minor == 1 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI);
+}
+
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(pipe_config);
@@ -317,8 +327,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* From XE_LPD onwards we supports compression bpps in steps of 1
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
+ *
+ * We don't want to calculate all rc parameters when the panel
+ * is MIPI DSI and it's using DSC 1.1. The reason being that some
+ * DSI panels vendors have hardcoded PPS params in the VBT causing
+ * the parameters sent from the source which are derived through
+ * interpolation to differ from the params the panel expects.
+ * This causes a noise in the display.
+ * Furthermore for DSI panels we are currently using bits_per_pixel
+ * (compressed bpp) hardcoded from VBT, (unlike other encoders where we
+ * find the optimum compressed bpp) so dont need to rely on interpolation,
+ * as we can get the required rc parameters from the tables.
*/
- if (DISPLAY_VER(display) >= 13) {
+ if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index cac49319026d..633a66f6b73b 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,6 +4,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -32,6 +34,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
fallthrough;
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return false;
intel_dp = intel_attached_dp(connector);
if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
@@ -182,7 +186,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- if (!HAS_CMRR(display))
+ /* Avoid CMRR for now till we have VRR with fixed timings working */
+ if (!HAS_CMRR(display) || true)
return false;
actual_refresh_k =
@@ -222,6 +227,121 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
return vtotal;
}
+static
+void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->cmrr.enable = true;
+ /*
+ * TODO: Compute precise target refresh rate to determine
+ * if video_mode_required should be true. Currently set to
+ * false due to uncertainty about the precise target
+ * refresh Rate.
+ */
+ crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+static
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->vrr.enable = true;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_vtotal;
+ else
+ return crtc_vtotal -
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_fixed_rr_vtotal(crtc_state) -
+ intel_vrr_flipline_offset(display);
+}
+
+static
+int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ intel_vrr_fixed_rr_flipline(crtc_state) - 1);
+}
+
+static
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For fixed rr, vmin = vmax = flipline.
+ * vmin is already set to crtc_vtotal set vmax and flipline the same.
+ */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * To make fixed rr and vrr work seamless the guardband/pipeline full
+ * should be set such that it satisfies both the fixed and variable
+ * timings.
+ * For this set the vmin as crtc_vtotal. With this we never need to
+ * change anything to do with the guardband.
+ */
+ return crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmax(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmax;
+
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ return vmax;
+}
+
void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -232,14 +352,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct intel_dp *intel_dp = intel_attached_dp(connector);
bool is_edp = intel_dp_is_edp(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- /*
- * FIXME all joined pipes share the same transcoder.
- * Need to account for that during VRR toggle/push/etc.
- */
- if (crtc_state->joiner_pipes)
+ if (!HAS_VRR(display))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -247,28 +362,40 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.in_range =
intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
- if (!crtc_state->vrr.in_range)
- return;
-
- if (HAS_LRR(display))
- crtc_state->update_lrr = true;
- vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
- adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
- vmax = adjusted_mode->crtc_clock * 1000 /
- (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ /*
+ * Allow fixed refresh rate with VRR Timing Generator.
+ * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual
+ * VRR and LRR.
+ * #TODO For actual VRR with joiner, we need to figure out how to
+ * correctly sequence transcoder level stuff vs. pipe level stuff
+ * in the commit.
+ */
+ if (crtc_state->joiner_pipes)
+ crtc_state->vrr.in_range = false;
- vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
- vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+ vmin = intel_vrr_compute_vmin(crtc_state);
- if (vmin >= vmax)
- return;
+ if (crtc_state->vrr.in_range) {
+ if (HAS_LRR(display))
+ crtc_state->update_lrr = true;
+ vmax = intel_vrr_compute_vmax(connector, adjusted_mode);
+ } else {
+ vmax = vmin;
+ }
crtc_state->vrr.vmin = vmin;
crtc_state->vrr.vmax = vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ if (crtc_state->uapi.vrr_enabled && vmin < vmax)
+ intel_vrr_compute_vrr_timings(crtc_state);
+ else if (is_cmrr_frac_required(crtc_state) && is_edp)
+ intel_vrr_compute_cmrr_timings(crtc_state);
+ else
+ intel_vrr_compute_fixed_rr_timings(crtc_state);
+
/*
* flipline determines the min vblank length the hardware will
* generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
@@ -276,29 +403,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
- /*
- * When panel is VRR capable and userspace has
- * not enabled adaptive sync mode then Fixed Average
- * Vtotal mode should be enabled.
- */
- if (crtc_state->uapi.vrr_enabled) {
- crtc_state->vrr.enable = true;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- } else if (is_cmrr_frac_required(crtc_state) && is_edp) {
- crtc_state->vrr.enable = true;
- crtc_state->cmrr.enable = true;
- /*
- * TODO: Compute precise target refresh rate to determine
- * if video_mode_required should be true. Currently set to
- * false due to uncertainty about the precise target
- * refresh Rate.
- */
- crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
- crtc_state->vrr.vmin = crtc_state->vrr.vmax;
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- }
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
@@ -380,14 +484,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
- intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+
+ if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
if (HAS_AS_SDP(display))
intel_de_write(display,
@@ -461,6 +562,17 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
+{
+ if (!HAS_VRR(display))
+ return false;
+
+ if (DISPLAY_VER(display) >= 30)
+ return true;
+
+ return false;
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -469,16 +581,25 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ crtc_state->vrr.vmin - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ crtc_state->vrr.vmax - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ crtc_state->vrr.flipline - 1);
+
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ if (crtc_state->cmrr.enable) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
+ trans_vrr_ctl(crtc_state));
+ } else {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ }
}
}
@@ -490,24 +611,77 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ }
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
+}
+
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
+ return;
+ }
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+}
+
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+
+ intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.flipline &&
+ crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
+ crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+}
+
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
+ bool vrr_enable;
trans_vrr_ctl = intel_de_read(display,
TRANS_VRR_CTL(display, cpu_transcoder));
- crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
@@ -536,6 +710,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not filled. Since for these platforms TRAN_VMIN is always
+ * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for
+ * adjusted_mode.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vtotal =
+ intel_vrr_vmin_vtotal(crtc_state);
+
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
intel_de_read(display,
@@ -547,6 +731,18 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
}
}
+ vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state);
+ else
+ crtc_state->vrr.enable = vrr_enable;
+
+ /*
+ * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags.
+ * Since CMRR is currently disabled, set this flag for VRR for now.
+ * Need to keep this in mind while re-enabling CMRR.
+ */
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 514822577e8a..38bf9996b883 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -13,6 +13,7 @@ struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_dsb;
+struct intel_display;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -35,5 +36,10 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index f00f4cfc58e5..bba82e888db2 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,15 +5,18 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i9xx_wm.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_wm.h"
#include "skl_watermark.h"
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
- * @i915: i915 device
+ * @display: display device
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -44,10 +47,10 @@
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_i915_private *i915)
+void intel_update_watermarks(struct intel_display *display)
{
- if (i915->display.funcs.wm->update_wm)
- i915->display.funcs.wm->update_wm(i915);
+ if (display->funcs.wm->update_wm)
+ display->funcs.wm->update_wm(display);
}
int intel_wm_compute(struct intel_atomic_state *state,
@@ -64,10 +67,10 @@ int intel_wm_compute(struct intel_atomic_state *state,
bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->initial_watermarks) {
- i915->display.funcs.wm->initial_watermarks(state, crtc);
+ if (display->funcs.wm->initial_watermarks) {
+ display->funcs.wm->initial_watermarks(state, crtc);
return true;
}
@@ -77,41 +80,41 @@ bool intel_initial_watermarks(struct intel_atomic_state *state,
void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->atomic_update_watermarks)
- i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+ if (display->funcs.wm->atomic_update_watermarks)
+ display->funcs.wm->atomic_update_watermarks(state, crtc);
}
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->optimize_watermarks)
- i915->display.funcs.wm->optimize_watermarks(state, crtc);
+ if (display->funcs.wm->optimize_watermarks)
+ display->funcs.wm->optimize_watermarks(state, crtc);
}
int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->compute_global_watermarks)
- return i915->display.funcs.wm->compute_global_watermarks(state);
+ if (display->funcs.wm->compute_global_watermarks)
+ return display->funcs.wm->compute_global_watermarks(state);
return 0;
}
-void intel_wm_get_hw_state(struct drm_i915_private *i915)
+void intel_wm_get_hw_state(struct intel_display *display)
{
- if (i915->display.funcs.wm->get_hw_state)
- return i915->display.funcs.wm->get_hw_state(i915);
+ if (display->funcs.wm->get_hw_state)
+ return display->funcs.wm->get_hw_state(display);
}
-void intel_wm_sanitize(struct drm_i915_private *i915)
+void intel_wm_sanitize(struct intel_display *display)
{
- if (i915->display.funcs.wm->sanitize)
- return i915->display.funcs.wm->sanitize(i915);
+ if (display->funcs.wm->sanitize)
+ return display->funcs.wm->sanitize(display);
}
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@ -137,16 +140,16 @@ bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
return plane_state->uapi.visible;
}
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[])
{
int level;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency not provided\n",
name, level);
continue;
@@ -156,43 +159,43 @@ void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency %u (%u.%u usec)\n", name, level,
wm[level], latency / 10, latency % 10);
}
}
-void intel_wm_init(struct drm_i915_private *i915)
+void intel_wm_init(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
- skl_wm_init(i915);
+ if (DISPLAY_VER(display) >= 9)
+ skl_wm_init(display);
else
- i9xx_wm_init(i915);
+ i9xx_wm_init(display);
}
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
int level;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
/*
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.valleyview ||
+ display->platform.cherryview ||
+ display->platform.g4x)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -201,18 +204,18 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
level, wm[level], latency / 10, latency % 10);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
wm_latency_show(m, latencies);
@@ -221,13 +224,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
wm_latency_show(m, latencies);
@@ -236,13 +239,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
wm_latency_show(m, latencies);
@@ -251,39 +254,39 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev_priv);
+ return single_open(file, pri_wm_latency_show, display);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev_priv);
+ return single_open(file, spr_wm_latency_show, display);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev_priv);
+ return single_open(file, cur_wm_latency_show, display);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 new[8] = {};
int level;
int ret;
@@ -300,15 +303,15 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
&new[0], &new[1], &new[2], &new[3],
&new[4], &new[5], &new[6], &new[7]);
- if (ret != dev_priv->display.wm.num_levels)
+ if (ret != display->wm.num_levels)
return -EINVAL;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
wm[level] = new[level];
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return len;
}
@@ -317,13 +320,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -332,13 +335,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -347,13 +350,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -385,18 +388,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
.write = cur_wm_latency_write
};
-void intel_wm_debugfs_register(struct drm_i915_private *i915)
+void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_pri_wm_latency_fops);
+ display, &i915_pri_wm_latency_fops);
debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_spr_wm_latency_fops);
+ display, &i915_spr_wm_latency_fops);
debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_cur_wm_latency_fops);
+ display, &i915_cur_wm_latency_fops);
- skl_watermark_debugfs_register(i915);
+ skl_watermark_debugfs_register(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index 7d3a447054b3..9ad4e9eae5ca 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
-void intel_update_watermarks(struct drm_i915_private *i915);
+void intel_update_watermarks(struct intel_display *display);
int intel_wm_compute(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool intel_initial_watermarks(struct intel_atomic_state *state,
@@ -24,13 +24,13 @@ void intel_atomic_update_watermarks(struct intel_atomic_state *state,
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
-void intel_wm_get_hw_state(struct drm_i915_private *i915);
-void intel_wm_sanitize(struct drm_i915_private *i915);
+void intel_wm_get_hw_state(struct intel_display *display);
+void intel_wm_sanitize(struct intel_display *display);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *i915,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[]);
-void intel_wm_init(struct drm_i915_private *i915);
-void intel_wm_debugfs_register(struct drm_i915_private *i915);
+void intel_wm_init(struct intel_display *display);
+void intel_wm_debugfs_register(struct intel_display *display);
#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 70e550539bb2..8739195aba69 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2689,22 +2689,24 @@ static const struct drm_plane_funcs tgl_plane_funcs = {
static void
skl_plane_enable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
spin_unlock_irq(&i915->irq_lock);
}
static void
skl_plane_disable_flip_done(struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
spin_unlock_irq(&i915->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 621e97943542..8080f777910a 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -19,6 +19,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
@@ -34,7 +35,7 @@
*/
#define DSB_EXE_TIME 100
-static void skl_sagv_disable(struct drm_i915_private *i915);
+static void skl_sagv_disable(struct intel_display *display);
/* Stores plane specific WM parameters */
struct skl_wm_params {
@@ -69,23 +70,21 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+static bool skl_needs_memory_bw_wa(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9;
+ return DISPLAY_VER(display) == 9;
}
bool
-intel_has_sagv(struct drm_i915_private *i915)
+intel_has_sagv(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
-intel_sagv_block_time(struct drm_i915_private *i915)
+intel_sagv_block_time(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -115,10 +114,8 @@ intel_sagv_block_time(struct drm_i915_private *i915)
}
}
-static void intel_sagv_init(struct drm_i915_private *i915)
+static void intel_sagv_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (!HAS_SAGV(display))
display->sagv.status = I915_SAGV_NOT_CONTROLLED;
@@ -127,14 +124,14 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* For icl+ this was already determined by intel_bw_init_hw().
*/
if (DISPLAY_VER(display) < 11)
- skl_sagv_disable(i915);
+ skl_sagv_disable(display);
drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- display->sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(display);
drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
+ str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
@@ -142,7 +139,7 @@ static void intel_sagv_init(struct drm_i915_private *i915)
display->sagv.block_time_us))
display->sagv.block_time_us = 0;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
display->sagv.block_time_us = 0;
}
@@ -157,17 +154,18 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static void skl_sagv_enable(struct drm_i915_private *i915)
+static void skl_sagv_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ if (display->sagv.status == I915_SAGV_ENABLED)
return;
- drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ drm_dbg_kms(display->drm, "Enabling SAGV\n");
ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -177,29 +175,30 @@ static void skl_sagv_enable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to enable SAGV\n");
+ drm_err(display->drm, "Failed to enable SAGV\n");
return;
}
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
}
-static void skl_sagv_disable(struct drm_i915_private *i915)
+static void skl_sagv_disable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ if (display->sagv.status == I915_SAGV_DISABLED)
return;
- drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -209,47 +208,47 @@ static void skl_sagv_disable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret);
return;
}
- i915->display.sagv.status = I915_SAGV_DISABLED;
+ display->sagv.status = I915_SAGV_DISABLED;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
+ if (!intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_disable(display);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
+ if (intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_enable(display);
}
static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -267,7 +266,7 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -276,12 +275,12 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -299,7 +298,7 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -308,12 +307,12 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -322,10 +321,10 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_pre_plane_update(state);
else
skl_sagv_pre_plane_update(state);
@@ -333,7 +332,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -342,10 +341,10 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_post_plane_update(state);
else
skl_sagv_post_plane_update(state);
@@ -353,12 +352,12 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
int max_level = INT_MAX;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return false;
if (!crtc_state->hw.active)
@@ -377,7 +376,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = i915->display.wm.num_levels - 1;
+ for (level = display->wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -423,104 +422,37 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (!i915->display.params.enable_sagv)
+ if (!display->params.enable_sagv)
return false;
- if (DISPLAY_VER(i915) >= 12)
+ /*
+ * SAGV is initially forced off because its current
+ * state can't be queried from pcode. Allow SAGV to
+ * be enabled upon the first real commit.
+ */
+ if (crtc_state->inherited)
+ return false;
+
+ if (DISPLAY_VER(display) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
return false;
return bw_state->pipe_sagv_reject == 0;
}
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case).
- *
- * drm_atomic_check_only() gets upset if we pull more crtcs
- * into the state, so we have to calculate this based on the
- * individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
- * crtcs not included in the commit would not switch to the
- * SAGV watermarks when we are about to enable SAGV, and that
- * would lead to underruns. This does mean extra power draw
- * when only a subset of the crtcs are blocking SAGV as the
- * other crtcs can't be allowed to use the more optimal
- * normal (ie. non-SAGV) watermarks.
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_crtc_can_enable_sagv(new_crtc_state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(i915, new_bw_state) !=
- intel_can_enable_sagv(i915, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -530,17 +462,17 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
return end;
}
-static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+static int intel_dbuf_slice_size(struct intel_display *display)
{
- return DISPLAY_INFO(i915)->dbuf.size /
- hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
+ return DISPLAY_INFO(display)->dbuf.size /
+ hweight8(DISPLAY_INFO(display)->dbuf.slice_mask);
}
static void
-skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask,
struct skl_ddb_entry *ddb)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
if (!slice_mask) {
ddb->start = 0;
@@ -552,10 +484,10 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
+ WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size);
}
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask)
{
struct skl_ddb_entry ddb;
@@ -564,15 +496,15 @@ static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask
else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
slice_mask = BIT(DBUF_S3);
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+ skl_ddb_entry_for_slices(display, slice_mask, &ddb);
return ddb.start;
}
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
enum dbuf_slice start_slice, end_slice;
u8 slice_mask = 0;
@@ -618,15 +550,14 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
unsigned int *weight_end,
unsigned int *weight_total)
{
- struct drm_i915_private *i915 =
- to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
enum pipe pipe;
*weight_start = 0;
*weight_end = 0;
*weight_total = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
int weight = dbuf_state->weight[pipe];
/*
@@ -652,7 +583,7 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
static int
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned int weight_total, weight_start, weight_end;
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
@@ -674,8 +605,8 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
- skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -709,7 +640,7 @@ out:
crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
crtc->base.base.id, crtc->base.name,
old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
@@ -734,10 +665,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
-static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+static unsigned int skl_wm_latency(struct intel_display *display, int level,
const struct skl_wm_params *wp)
{
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = display->wm.skl_latency[level];
if (latency == 0)
return 0;
@@ -746,11 +677,11 @@ static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
+ if ((display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) && skl_watermark_ipc_enabled(display))
latency += 4;
- if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled)
latency += 15;
return latency;
@@ -760,8 +691,8 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
@@ -772,10 +703,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- unsigned int latency = skl_wm_latency(i915, level, &wp);
+ for (level = 0; level < display->wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(display, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -797,14 +728,13 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
}
static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+skl_ddb_get_hw_plane_state(struct intel_display *display,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
struct skl_ddb_entry *ddb_y,
u16 *min_ddb, u16 *interim_ddb)
{
- struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
@@ -837,7 +767,6 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
u16 *min_ddb, u16 *interim_ddb)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -849,7 +778,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
return;
for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(i915, pipe,
+ skl_ddb_get_hw_plane_state(display, pipe,
plane_id,
&ddb[plane_id],
&ddb_y[plane_id],
@@ -1367,16 +1296,16 @@ static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbu
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
@@ -1416,8 +1345,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
u64 data_rate = 0;
@@ -1427,7 +1356,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
data_rate += crtc_state->rel_data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->rel_data_rate_y[plane_id];
}
@@ -1489,7 +1418,7 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
const struct skl_plane_wm *wm)
{
/*
@@ -1543,7 +1472,6 @@ static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_dbuf_state *dbuf_state =
@@ -1585,7 +1513,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1596,7 +1524,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
@@ -1615,9 +1543,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
if (level < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ drm_dbg_kms(display->drm, "minimum required %d/%d\n",
blocks, iter.size);
return -EINVAL;
}
@@ -1645,7 +1573,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (plane_id == PLANE_CURSOR)
continue;
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
crtc_state->rel_data_rate_y[plane_id]);
@@ -1661,7 +1589,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
}
}
- drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+ drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
/*
* When we calculated watermark values we didn't know how high
@@ -1669,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1678,7 +1606,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id))
skl_check_nv12_wm_level(&wm->wm[level],
&wm->uv_wm[level],
@@ -1686,7 +1614,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (skl_need_wm_copy_wa(i915, level, wm)) {
+ if (skl_need_wm_copy_wa(display, level, wm)) {
wm->wm[level].blocks = wm->wm[level - 1].blocks;
wm->wm[level].lines = wm->wm[level - 1].lines;
wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
@@ -1708,7 +1636,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_check_wm_level(&wm->trans_wm, ddb_y);
} else {
@@ -1734,7 +1662,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+skl_wm_method1(struct intel_display *display, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
u32 wm_intermediate_val;
@@ -1746,7 +1674,7 @@ skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -1772,7 +1700,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -1782,7 +1710,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ if (drm_WARN_ON(display->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
@@ -1798,15 +1726,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane, unsigned int pan_x)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Non planar format have single plane\n");
return -EINVAL;
}
@@ -1824,7 +1750,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
@@ -1849,7 +1775,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines = 4;
}
- if (skl_needs_memory_bw_wa(i915))
+ if (skl_needs_memory_bw_wa(display))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -1860,7 +1786,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
- else if (DISPLAY_VER(i915) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1869,7 +1795,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
- if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ if (!wp->x_tiled || DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -1906,18 +1832,18 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
plane_state->uapi.src.x1);
}
-static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+static bool skl_wm_has_lines(struct intel_display *display, int level)
{
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
return true;
/* The number of lines are ignored for the level 0 watermark. */
return level > 0;
}
-static int skl_wm_max_lines(struct drm_i915_private *i915)
+static int skl_wm_max_lines(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 255;
else
return 31;
@@ -1938,7 +1864,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 blocks, lines, min_ddb_alloc = 0;
@@ -1950,7 +1876,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ method1 = skl_wm_method1(display, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
crtc_state->hw.pipe_mode.crtc_htotal,
@@ -1965,7 +1891,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -1975,7 +1901,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result);
- if (DISPLAY_VER(i915) < 30)
+ if (DISPLAY_VER(display) < 30)
blocks++;
/*
@@ -1994,13 +1920,13 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* channels' impact on the level 0 memory latency and the relevant
* wm calculations.
*/
- if (skl_wm_has_lines(i915, level))
+ if (skl_wm_has_lines(display, level))
blocks = max(blocks,
fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
@@ -2025,7 +1951,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2042,10 +1968,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (!skl_wm_has_lines(i915, level))
+ if (!skl_wm_has_lines(display, level))
lines = 0;
- if (lines > skl_wm_max_lines(i915)) {
+ if (lines > skl_wm_max_lines(display)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
@@ -2064,8 +1990,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
result->enable = true;
result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
- if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
- result->can_sagv = latency >= i915->display.sagv.block_time_us;
+ if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us)
+ result->can_sagv = latency >= display->sagv.block_time_us;
}
static void
@@ -2074,13 +2000,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *result_prev = &levels[0];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = skl_wm_latency(i915, level, wm_params);
+ unsigned int latency = skl_wm_latency(display, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -2094,21 +2020,21 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_plane_wm *plane_wm)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = 0;
- if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us +
- skl_wm_latency(i915, 0, wm_params);
+ if (display->sagv.block_time_us)
+ latency = display->sagv.block_time_us +
+ skl_wm_latency(display, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
sagv_wm);
}
-static void skl_compute_transition_wm(struct drm_i915_private *i915,
+static void skl_compute_transition_wm(struct intel_display *display,
struct skl_wm_level *trans_wm,
const struct skl_wm_level *wm0,
const struct skl_wm_params *wp)
@@ -2117,23 +2043,23 @@ static void skl_compute_transition_wm(struct drm_i915_private *i915,
u16 wm0_blocks, trans_offset, blocks;
/* Transition WM don't make any sense if ipc is disabled */
- if (!skl_watermark_ipc_enabled(i915))
+ if (!skl_watermark_ipc_enabled(display))
return;
/*
* WaDisableTWM:skl,kbl,cfl,bxt
* Transition WM are not recommended by HW team for GEN9
*/
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
trans_min = 4;
else
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(i915) == 10)
+ if (DISPLAY_VER(display) == 10)
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
@@ -2175,8 +2101,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct intel_plane *plane, int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
struct skl_wm_params wm_params;
int ret;
@@ -2188,13 +2113,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
- skl_compute_transition_wm(i915, &wm->trans_wm,
+ skl_compute_transition_wm(display, &wm->trans_wm,
&wm->wm[0], &wm_params);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
- skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ skl_compute_transition_wm(display, &wm->sagv.trans_wm,
&wm->sagv.wm0, &wm_params);
}
@@ -2254,8 +2179,8 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
@@ -2269,9 +2194,9 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ drm_WARN_ON(display->drm, !fb->format->is_yuv ||
fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
@@ -2411,15 +2336,14 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
int latency;
/* FIXME should we care about the latency w/a's? */
- latency = skl_wm_latency(i915, level, NULL);
+ latency = skl_wm_latency(display, level, NULL);
if (latency == 0)
continue;
@@ -2436,8 +2360,8 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int wm0_lines, level;
if (!crtc_state->hw.active)
@@ -2453,9 +2377,9 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
* PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
* based on whether we're limited by the vblank duration.
*/
- crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
+ crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2471,10 +2395,10 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
}
}
- if (DISPLAY_VER(i915) >= 12 &&
- i915->display.sagv.block_time_us &&
+ if (DISPLAY_VER(display) >= 12 &&
+ display->sagv.block_time_us &&
skl_is_vblank_too_short(crtc_state, wm0_lines,
- i915->display.sagv.block_time_us)) {
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2492,7 +2416,7 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state;
@@ -2508,7 +2432,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
if (plane->pipe != crtc->pipe)
continue;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
else
ret = skl_build_plane_wm(crtc_state, plane_state);
@@ -2531,11 +2455,10 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
-static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+static bool skl_plane_wm_equals(struct intel_display *display,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- struct intel_display *display = &i915->display;
int level;
for (level = 0; level < display->wm.num_levels; level++) {
@@ -2590,14 +2513,14 @@ static int
skl_ddb_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2608,7 +2531,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -2627,7 +2550,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
u8 enabled_slices;
enum pipe pipe;
@@ -2637,7 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
*/
enabled_slices = BIT(DBUF_S1);
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
enabled_slices |= dbuf_state->slices[pipe];
return enabled_slices;
@@ -2647,7 +2570,6 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
struct intel_crtc_state *new_crtc_state;
@@ -2686,7 +2608,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
}
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
@@ -2709,11 +2631,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- DISPLAY_INFO(i915)->dbuf.slice_mask,
+ DISPLAY_INFO(display)->dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -2731,7 +2653,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
ret = skl_crtc_allocate_ddb(state, crtc);
if (ret)
return ret;
@@ -2758,7 +2680,7 @@ static char enast(bool enable)
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_plane *plane;
@@ -2775,7 +2697,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
old_pipe_wm = &old_crtc_state->wm.skl.optimal;
new_pipe_wm = &new_crtc_state->wm.skl.optimal;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -2785,24 +2707,24 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
new_wm = &new_pipe_wm->planes[plane_id];
- if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
" -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
@@ -2821,7 +2743,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.enable),
enast(new_wm->sagv.trans_wm.enable));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
@@ -2848,7 +2770,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2867,7 +2789,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
new_wm->sagv.wm0.blocks,
new_wm->sagv.trans_wm.blocks);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2945,14 +2867,14 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2971,7 +2893,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -3002,7 +2924,6 @@ void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
u32 latency = LNL_PKG_C_LATENCY_MASK;
@@ -3028,7 +2949,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
added_wake_time = DSB_EXE_TIME +
display->sagv.block_time_us;
- latency = skl_watermark_max_latency(i915, 1);
+ latency = skl_watermark_max_latency(display, 1);
/* Wa_22020432604 */
if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
@@ -3055,6 +2976,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
static int
skl_compute_wm(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
@@ -3069,16 +2991,35 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
* check if the final watermarks changed.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
+ DISPLAY_VER(display) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -3149,11 +3090,10 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-static void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct intel_crtc *crtc;
if (HAS_MBUS_JOINING(display))
@@ -3193,7 +3133,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (!crtc_state->hw.active)
continue;
- skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ skl_ddb_get_hw_plane_state(display, crtc->pipe,
plane_id, ddb, ddb_y,
min_ddb, interim_ddb);
@@ -3209,13 +3149,13 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
*/
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(i915, slices);
+ mbus_offset = mbus_ddb_offset(display, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
/* The slices actually used by the planes on the pipe */
dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+ skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
@@ -3228,49 +3168,52 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+bool skl_watermark_ipc_enabled(struct intel_display *display)
{
- return i915->display.wm.ipc_enabled;
+ return display->wm.ipc_enabled;
}
-void skl_watermark_ipc_update(struct drm_i915_private *i915)
+void skl_watermark_ipc_update(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0);
}
-static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(i915))
+ if (display->platform.skylake)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915))
+ if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake)
return i915->dram_info.symmetric_memory;
return true;
}
-void skl_watermark_ipc_init(struct drm_i915_private *i915)
+void skl_watermark_ipc_init(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+ display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
}
static void
-adjust_wm_latency(struct drm_i915_private *i915,
+adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3311,31 +3254,32 @@ adjust_wm_latency(struct drm_i915_private *i915,
wm[0] += 1;
}
-static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
+ int num_levels = display->wm.num_levels;
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(display, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, num_levels, 6);
+ adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
- int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
- int mult = IS_DG2(i915) ? 2 : 1;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ int num_levels = display->wm.num_levels;
+ int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
+ int mult = display->platform.dg2 ? 2 : 1;
u32 val;
int ret;
@@ -3343,7 +3287,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 0; /* data0 to be programmed to 0 for first set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3356,7 +3300,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 1; /* data0 to be programmed to 1 for second set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3365,24 +3309,22 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, num_levels, read_latency);
+ adjust_wm_latency(display, wm, num_levels, read_latency);
}
-static void skl_setup_wm_latency(struct drm_i915_private *i915)
+static void skl_setup_wm_latency(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (HAS_HW_SAGV_WM(display))
display->wm.num_levels = 6;
else
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(i915, display->wm.skl_latency);
+ mtl_read_wm_latency(display, display->wm.skl_latency);
else
- skl_read_wm_latency(i915, display->wm.skl_latency);
+ skl_read_wm_latency(display, display->wm.skl_latency);
- intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3410,19 +3352,18 @@ static const struct intel_global_state_funcs intel_dbuf_funcs = {
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *dbuf_state;
- dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj);
if (IS_ERR(dbuf_state))
return ERR_CAST(dbuf_state);
return to_intel_dbuf_state(dbuf_state);
}
-int intel_dbuf_init(struct drm_i915_private *i915)
+int intel_dbuf_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
@@ -3457,34 +3398,34 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 val = 0;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
}
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
val |= MBUS_DBOX_B_CREDIT(0xA);
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(12);
} else {
@@ -3492,7 +3433,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
val |= MBUS_DBOX_B_CREDIT(8);
}
- if (DISPLAY_VERx100(i915) == 1400) {
+ if (DISPLAY_VERx100(display) == 1400) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
val |= MBUS_DBOX_BW_8CREDITS_MTL;
else
@@ -3502,22 +3443,22 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
return val;
}
-static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+static void pipe_mbus_dbox_ctl_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe),
pipe_mbus_dbox_ctl(crtc, dbuf_state));
}
static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
@@ -3527,7 +3468,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
return;
- pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
+ pipe_mbus_dbox_ctl_update(display, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3544,10 +3485,9 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
return intel_atomic_lock_global_state(&dbuf_state->base);
}
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus)
{
- struct intel_display *display = &i915->display;
enum dbuf_slice slice;
if (!HAS_MBUS_JOINING(display))
@@ -3571,7 +3511,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
@@ -3586,7 +3526,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state
mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
}
- intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio,
new_dbuf_state->joined_mbus);
}
@@ -3594,13 +3534,12 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
- drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+ drm_WARN_ON(display->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes));
crtc = intel_crtc_for_pipe(display, pipe);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -3611,7 +3550,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void mbus_ctl_join_update(struct drm_i915_private *i915,
+static void mbus_ctl_join_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state,
enum pipe pipe)
{
@@ -3627,7 +3566,7 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
else
mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
- intel_de_rmw(i915, MBUS_CTL,
+ intel_de_rmw(display, MBUS_CTL,
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
@@ -3635,18 +3574,18 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus),
pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
- mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+ mbus_ctl_join_update(display, new_dbuf_state, pipe);
}
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
@@ -3751,9 +3690,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
-static void skl_mbus_sanitize(struct drm_i915_private *i915)
+static void skl_mbus_sanitize(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(display->dbuf.obj.state);
@@ -3768,28 +3706,28 @@ static void skl_mbus_sanitize(struct drm_i915_private *i915)
dbuf_state->active_pipes);
dbuf_state->joined_mbus = false;
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
dbuf_state->mdclk_cdclk_ratio,
dbuf_state->joined_mbus);
- pipe_mbus_dbox_ctl_update(i915, dbuf_state);
- mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+ pipe_mbus_dbox_ctl_update(display, dbuf_state);
+ mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE);
}
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+static bool skl_dbuf_is_misconfigured(struct intel_display *display)
{
const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
@@ -3807,7 +3745,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+static void skl_dbuf_sanitize(struct intel_display *display)
{
struct intel_crtc *crtc;
@@ -3822,12 +3760,12 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
- if (!skl_dbuf_is_misconfigured(i915))
+ if (!skl_dbuf_is_misconfigured(display))
return;
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+ drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -3837,16 +3775,16 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+ drm_WARN_ON(display->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
-static void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct intel_display *display)
{
- skl_mbus_sanitize(i915);
- skl_dbuf_sanitize(i915);
+ skl_mbus_sanitize(display);
+ skl_dbuf_sanitize(display);
}
void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3897,7 +3835,6 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_hw_state {
@@ -3912,7 +3849,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
u8 hw_enabled_slices;
int level;
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -3925,26 +3862,26 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
+ if (DISPLAY_VER(display) >= 11 &&
+ hw_enabled_slices != display->dbuf.enabled_slices)
+ drm_err(display->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
+ display->dbuf.enabled_slices,
hw_enabled_slices);
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name, level,
sw_wm_level->enable,
@@ -3959,7 +3896,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3975,7 +3912,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3991,7 +3928,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -4007,7 +3944,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
@@ -4024,29 +3961,29 @@ static const struct intel_wm_funcs skl_wm_funcs = {
.sanitize = skl_wm_sanitize,
};
-void skl_wm_init(struct drm_i915_private *i915)
+void skl_wm_init(struct intel_display *display)
{
- intel_sagv_init(i915);
+ intel_sagv_init(display);
- skl_setup_wm_latency(i915);
+ skl_setup_wm_latency(display);
- i915->display.funcs.wm = &skl_wm_funcs;
+ display->funcs.wm = &skl_wm_funcs;
}
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(skl_watermark_ipc_enabled(i915)));
+ str_yes_no(skl_watermark_ipc_enabled(display)));
return 0;
}
static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *i915 = inode->i_private;
+ struct intel_display *display = inode->i_private;
- return single_open(file, skl_watermark_ipc_status_show, i915);
+ return single_open(file, skl_watermark_ipc_status_show, display);
}
static ssize_t skl_watermark_ipc_status_write(struct file *file,
@@ -4054,8 +3991,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *i915 = m->private;
- intel_wakeref_t wakeref;
+ struct intel_display *display = m->private;
bool enable;
int ret;
@@ -4063,12 +3999,12 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
if (ret < 0)
return ret;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- if (!skl_watermark_ipc_enabled(i915) && enable)
- drm_info(&i915->drm,
+ with_intel_display_rpm(display) {
+ if (!skl_watermark_ipc_enabled(display) && enable)
+ drm_info(display->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- i915->display.wm.ipc_enabled = enable;
- skl_watermark_ipc_update(i915);
+ display->wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(display);
}
return len;
@@ -4085,7 +4021,7 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
static int intel_sagv_status_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
static const char * const sagv_status[] = {
[I915_SAGV_UNKNOWN] = "unknown",
[I915_SAGV_DISABLED] = "disabled",
@@ -4093,37 +4029,36 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
[I915_SAGV_NOT_CONTROLLED] = "not controlled",
};
- seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display)));
seq_printf(m, "SAGV modparam: %s\n",
- str_enabled_disabled(i915->display.params.enable_sagv));
- seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
- seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+ str_enabled_disabled(display->params.enable_sagv));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915)
+void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct drm_minor *minor = display->drm->primary;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
- &intel_sagv_status_fops);
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ display, &intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
+unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
- unsigned int latency = skl_wm_latency(i915, level, NULL);
+ for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) {
+ unsigned int latency = skl_wm_latency(display, level, NULL);
if (latency)
return latency;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index d9cff6c54310..95b0b599d5c3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -12,7 +12,6 @@
#include "intel_global_state.h"
#include "intel_wm_types.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bw_state;
struct intel_crtc;
@@ -27,11 +26,12 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state);
-bool intel_has_sagv(struct drm_i915_private *i915);
+bool intel_has_sagv(struct intel_display *display);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
@@ -45,14 +45,14 @@ void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
-void skl_watermark_ipc_init(struct drm_i915_private *i915);
-void skl_watermark_ipc_update(struct drm_i915_private *i915);
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_ipc_init(struct intel_display *display);
+void skl_watermark_ipc_update(struct intel_display *display);
+bool skl_watermark_ipc_enabled(struct intel_display *display);
+void skl_watermark_debugfs_register(struct intel_display *display);
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+unsigned int skl_watermark_max_latency(struct intel_display *display,
int initial_wm_level);
-void skl_wm_init(struct drm_i915_private *i915);
+void skl_wm_init(struct intel_display *display);
const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
@@ -86,13 +86,13 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
int ratio);
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index af717df83197..346737f15fa9 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -251,8 +251,10 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
return 0;
}
-static void band_gap_reset(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
vlv_flisdsi_get(dev_priv);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
@@ -269,13 +271,13 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->pipe_bpp = 18;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/* Enable Frame time stamp based scanline reporting */
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -468,7 +470,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
vlv_flisdsi_put(dev_priv);
/* bandgap reset is needed after everytime we do power gate */
- band_gap_reset(dev_priv);
+ band_gap_reset(display);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -495,11 +497,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_device_ready(encoder);
else
vlv_dsi_device_ready(encoder);
@@ -559,23 +561,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
-static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port)
{
- return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ return display->platform.geminilake || display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
intel_de_write(display, MIPI_DEVICE_READY(display, port),
@@ -594,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((display->platform.broxton || port == PORT_A) &&
intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
@@ -612,7 +613,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -620,7 +620,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp = intel_dsi->pixel_overlap;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
@@ -633,7 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
u32 temp;
temp = intel_de_read(display, port_ctrl);
@@ -644,7 +644,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- if (IS_BROXTON(dev_priv))
+ if (display->platform.broxton)
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= crtc->pipe ?
@@ -664,12 +664,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
@@ -730,7 +729,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
bool glk_cold_boot = false;
@@ -745,7 +743,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
bxt_dsi_pll_enable(encoder, pipe_config);
} else {
@@ -753,7 +751,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
vlv_dsi_pll_enable(encoder, pipe_config);
}
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Add MIPI IO reset programming for modeset */
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
@@ -762,13 +760,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
- if (!IS_GEMINILAKE(dev_priv))
+ if (!display->platform.geminilake)
intel_dsi_prepare(encoder, pipe_config);
/* Give the panel time to power-on and then deassert its reset */
@@ -776,7 +774,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
@@ -788,7 +786,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
- if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+ if (display->platform.geminilake && !glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
@@ -836,11 +834,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -860,9 +858,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_clear_device_ready(encoder);
else
vlv_dsi_clear_device_ready(encoder);
@@ -874,13 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
intel_crtc_vblank_off(old_crtc_state);
skl_scaler_disable(old_crtc_state);
@@ -907,7 +904,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
/* Transition to LP-00 */
intel_dsi_clear_device_ready(encoder);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Power down DSI regulator to save power */
intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
@@ -917,12 +914,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -939,7 +936,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
@@ -957,13 +953,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- !bxt_dsi_pll_is_enabled(dev_priv))
+ if ((display->platform.geminilake || display->platform.broxton) &&
+ !bxt_dsi_pll_is_enabled(display))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
@@ -971,10 +967,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
port == PORT_C)
enabled = intel_de_read(display,
- TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -989,7 +985,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1177,15 +1173,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
@@ -1218,7 +1214,6 @@ static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1253,7 +1248,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1307,7 +1302,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1327,7 +1321,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1342,7 +1336,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
enum pipe pipe = crtc->pipe;
intel_de_rmw(display, MIPI_CTRL(display, port),
@@ -1377,7 +1371,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1424,7 +1418,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
!intel_dsi->dual_link) {
/*
* BXT spec says write MIPI_INIT_COUNT for
@@ -1461,7 +1455,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
@@ -1513,18 +1507,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return;
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
@@ -1596,8 +1589,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
+ struct intel_display *display = to_intel_display(connector);
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
@@ -1645,7 +1638,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* For GEMINILAKE dphy_param_reg will be programmed in terms of
* HS byte clock count for other platform in HS ddr clock count
*/
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ mul = display->platform.geminilake ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
@@ -1653,7 +1646,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ drm_dbg_kms(display->drm, "prepare count too high %u\n",
prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1674,7 +1667,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ drm_dbg_kms(display->drm, "exit zero count too high %u\n",
exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1685,7 +1678,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ drm_dbg_kms(display->drm, "clock zero count too high %u\n",
clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1695,7 +1688,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ drm_dbg_kms(display->drm, "trail count too high %u\n",
trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1761,7 +1754,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1770,7 +1763,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return 320000;
/*
@@ -1778,7 +1771,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* picture gets unstable, despite that values are
* correct for DSI PLL and DE PLL.
*/
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return 158400;
return 0;
@@ -1903,9 +1896,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
{ }
};
-void vlv_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1914,16 +1906,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(display, &port))
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
+ if (display->platform.geminilake || display->platform.broxton)
+ display->dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
+ display->dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1938,12 +1930,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
encoder = &intel_dsi->base;
intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->compute_config = intel_dsi_compute_config;
encoder->pre_enable = intel_dsi_pre_enable;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->enable = bxt_dsi_enable;
encoder->disable = intel_dsi_disable;
encoder->post_disable = intel_dsi_post_disable;
@@ -1963,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->pipe_mask = ~0;
else if (port == PORT_A)
encoder->pipe_mask = BIT(PIPE_A);
@@ -1979,10 +1971,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
@@ -1998,18 +1990,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ drm_dbg_kms(display->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
- drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
+ drm_dbg_kms(display->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -2021,7 +2013,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi_vbt_gpio_init(intel_dsi,
intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
+ drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
@@ -2030,12 +2022,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(connector, encoder);
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(connector)) {
- drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
+ drm_dbg_kms(display->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index 277bacfbc551..ff349b5876c2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -7,14 +7,14 @@
#define __VLV_DSI_H__
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
+void vlv_dsi_init(struct intel_display *display);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
@@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void vlv_dsi_init(struct drm_i915_private *dev_priv)
+static inline void vlv_dsi_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 2ed47e7d1051..7ce924a5ef90 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -57,7 +57,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
return dsi_clk_khz;
}
-static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+static int dsi_calc_mnp(struct intel_display *display,
struct intel_crtc_state *config,
int target_dsi_clk)
{
@@ -68,11 +68,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
+ drm_err(display->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
ref_clk = 100000;
n = 4;
m_min = 70;
@@ -116,13 +116,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_dsi_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int refclk = display->platform.cherryview ? 100000 : 25000;
int i;
pll_ctl = config->dsi_pll.ctrl;
@@ -147,7 +147,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ drm_err(display->drm, "wrong P1 divisor\n");
return 0;
}
@@ -157,7 +157,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ drm_err(display->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -175,16 +175,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+ ret = dsi_calc_mnp(display, config, dsi_clk);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
+ drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -196,7 +196,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
pclk = vlv_dsi_pclk(encoder, config);
@@ -213,9 +213,10 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -235,20 +236,21 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
+ drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -260,14 +262,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(dev_priv);
}
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
u32 val;
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(display, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- if (IS_GEMINILAKE(dev_priv)) {
+ val = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (display->platform.geminilake) {
if (!(val & BXT_DSIA_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -301,29 +303,30 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pll_ctl, pll_div;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -352,14 +355,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder,
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 pclk;
- config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
pclk = bxt_dsi_pclk(encoder, config);
- drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
+ drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -375,10 +378,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
-static void glk_dsi_program_esc_clock(struct drm_device *dev,
- const struct intel_crtc_state *config)
+static void glk_dsi_program_esc_clock(struct intel_display *display,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dsi_rate = 0;
u32 pll_ratio = 0;
u32 ddr_clk = 0;
@@ -415,17 +417,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
txesc2_div = min_t(u32, div2_value, 10);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV1,
(1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV2,
(1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+static void bxt_dsi_program_clocks(struct intel_display *display, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -436,7 +437,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -472,13 +473,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -494,7 +495,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
} else {
@@ -503,11 +504,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Can't get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -519,7 +520,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+ if (display->platform.broxton && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
pclk = bxt_dsi_pclk(encoder, config);
@@ -536,46 +537,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Configure PLL vales */
- intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
+ intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(display, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ bxt_dsi_program_clocks(display, port, config);
} else {
- glk_dsi_program_esc_clock(encoder->base.dev, config);
+ glk_dsi_program_esc_clock(display, config);
}
/* Enable DSI PLL */
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
/* Clear old configurations */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index f975660fa609..f26e31a7dd69 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
struct intel_display;
struct intel_encoder;
@@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+bool bxt_dsi_pll_is_enabled(struct intel_display *display);
void assert_dsi_pll_enabled(struct intel_display *display);
void assert_dsi_pll_disabled(struct intel_display *display);
#else
-static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 388f90784d8a..f566191d843b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -48,8 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+ return mr && intel_memory_type_is_local(mr->type);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f6c59f20832f..46a5aa4ab9c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -289,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN8_PAGE_PRESENT;
+ *is_local = pte & GEN12_GGTT_PTE_LM;
+
+ return pte & GEN12_GGTT_PTE_ADDR_MASK;
+}
+
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
@@ -435,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
writeq(pte, addr);
}
+static gen8_pte_t gen8_get_pte(void __iomem *addr)
+{
+ return readq(addr);
+}
+
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -450,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
+}
+
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
@@ -605,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset,
+ bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return vm->pte_decode(ioread32(pte), is_present, is_local);
+}
+
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
@@ -769,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ return ggtt->vm.read_entry(vm, offset, is_present, is_local);
+}
+
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
@@ -1245,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen8_ggtt_read_entry;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
@@ -1291,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
+
return ggtt_probe_common(ggtt, size);
}
@@ -1390,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN6_PTE_VALID;
+ *is_local = false;
+
+ return ((pte & 0xff0) << 28) | (pte & ~0xfff);
+}
+
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -1428,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen6_ggtt_read_entry;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -1443,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_decode = gen6_pte_decode;
+
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 59eed0a0ce90..c5f5f0bdfb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -27,6 +27,13 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm,
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
+static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT,
+ is_present, is_local);
+}
+
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@@ -103,6 +110,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
+ ggtt->vm.read_entry = gmch_ggtt_read_entry;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 0a36ea751b63..9d3a3ad567a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -312,6 +312,7 @@ struct i915_address_space {
u64 (*pte_encode)(dma_addr_t addr,
unsigned int pat_index,
u32 flags); /* Create a valid PTE */
+ dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local);
#define PTE_READ_ONLY BIT(0)
#define PTE_LM BIT(1)
@@ -340,6 +341,8 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags);
+ dma_addr_t (*read_entry)(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -590,6 +593,9 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9378d5901c49..9ca42589da4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- /*
- * BSpec 52698 - Render powergating must be off.
- * FIXME BSpec is outdated, disabling powergating for MTL is just
- * temporary wa and should be removed after fixing real cause
- * of forcewake timeouts.
- */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
- pg_enable =
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
- else
- pg_enable =
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 64e9317f58fb..8731f275fdd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -608,7 +609,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
rps->ips.last_time2 = ktime_get_raw_ns();
spin_lock(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PCU_EVENT);
+ ilk_enable_display_irq(display, DE_PCU_EVENT);
spin_unlock(&i915->irq_lock);
spin_unlock_irq(&mchdev_lock);
@@ -621,13 +622,14 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
spin_lock(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PCU_EVENT);
+ ilk_disable_display_irq(display, DE_PCU_EVENT);
spin_unlock(&i915->irq_lock);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d791f9baa11d..456d3372eef8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
+void intel_huc_fini_late(struct intel_huc *huc)
+{
+ delayed_huc_load_fini(huc);
+}
+
#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
static int check_huc_loading_mode(struct intel_huc *huc)
{
@@ -414,12 +419,6 @@ out:
void intel_huc_fini(struct intel_huc *huc)
{
- /*
- * the fence is initialized in init_early, so we need to clean it up
- * even if HuC loading is off.
- */
- delayed_huc_load_fini(huc);
-
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index d5e441b9e08d..921ad4b1687f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -55,6 +55,7 @@ struct intel_huc {
int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
+void intel_huc_fini_late(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 90ba1b0b4c9d..4a3493e8d433 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc)
void intel_uc_driver_late_release(struct intel_uc *uc)
{
+ intel_huc_fini_late(&uc->huc);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index eedd1865bb98..62d14f82256f 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
+ intel_wakeref_t wakeref;
int ret;
if (high_gm) {
@@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&gt->ggtt->vm.mutex);
- mmio_hw_access_pre(gt);
+ wakeref = mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
@@ -226,7 +227,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(uncore->rpm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index baccbf1761b7..673534f061ef 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
.diff = 0,
};
struct diff_mmio *node, *next;
+ intel_wakeref_t wakeref;
INIT_LIST_HEAD(&param.diff_mmio_list);
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2fa7ca19ba5d..ae9b0ded3651 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gt);
intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
}
static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 01d890999f25..1d10c16e6465 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -570,14 +570,15 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct intel_gt *gt)
+static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(gt->uncore->rpm);
+ return intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct intel_gt *gt)
+static inline void mmio_hw_access_post(struct intel_gt *gt,
+ intel_wakeref_t wakeref)
{
- intel_runtime_pm_put_unchecked(gt->uncore->rpm);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4efee6797873..e6e9010462e3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -264,6 +264,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
+ intel_wakeref_t wakeref;
int ret;
ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
@@ -271,10 +272,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
return 0;
}
@@ -513,7 +514,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
+ refclk = 135000;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -544,7 +545,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
+ int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {};
@@ -1975,10 +1976,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
vgpu == gvt->scheduler.engine_owner[engine->id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
vgpu_vreg(vgpu, offset) =
intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -3209,10 +3212,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
int i, id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
@@ -3233,8 +3238,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt)
int id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 509f9ccae3a9..dbad4d853d3a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
u8 *buf;
struct opregion_header *header;
struct vbt v;
- const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
- memcpy(header->signature, opregion_signature,
- sizeof(opregion_signature));
+
+ static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
+ memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
+
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 9f97f743aa71..6c2d68e88266 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -447,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
if (!vgpu_data->active)
return;
@@ -465,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[engine->id] == vgpu) {
@@ -474,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d9e263913ff..967c0501e91e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
-
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ce3cc93ea211..f5262b8ad237 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -578,7 +578,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
intel_dram_detect(dev_priv);
- intel_bw_init_hw(dev_priv);
+ intel_bw_init_hw(display);
return 0;
@@ -622,11 +622,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
* Perform any steps necessary to make the driver available via kernel
* internal or userspace interfaces.
*/
-static void i915_driver_register(struct drm_i915_private *dev_priv)
+static int i915_driver_register(struct drm_i915_private *dev_priv)
{
struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
+ int ret;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -634,10 +635,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(&dev_priv->drm, 0)) {
- drm_err(&dev_priv->drm,
- "Failed to register driver for userspace access!\n");
- return;
+ ret = drm_dev_register(&dev_priv->drm, 0);
+ if (ret) {
+ i915_probe_error(dev_priv,
+ "Failed to register driver for userspace access!\n");
+ drm_dev_unregister(&dev_priv->drm);
+ i915_pmu_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
+ return ret;
}
i915_debugfs_register(dev_priv);
@@ -660,6 +665,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
if (i915_switcheroo_register(dev_priv))
drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+
+ return 0;
}
/**
@@ -834,7 +841,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_gem;
- i915_driver_register(i915);
+ ret = i915_driver_register(i915);
+ if (ret)
+ goto out_cleanup_gem;
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -845,6 +854,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_gem:
+ intel_pxp_fini(i915);
i915_gem_suspend(i915);
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
@@ -981,7 +991,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
- intel_hpd_cancel_work(i915);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
@@ -1064,7 +1074,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
@@ -1201,7 +1211,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_pps_unlock_regs_wa(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/*
* Interrupts have to be enabled before any batches are run. If not the
@@ -1227,7 +1237,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_resume_access(display);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(display);
intel_display_driver_resume(display);
@@ -1235,7 +1245,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -1575,7 +1585,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
@@ -1633,11 +1643,11 @@ static int intel_runtime_resume(struct device *kdev)
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
}
- skl_watermark_ipc_update(dev_priv);
+ skl_watermark_ipc_update(display);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ffc346379cc2..236c48d282e4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -305,6 +305,8 @@ struct drm_i915_private {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37ca4a35daf2..c1f938a1da44 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,14 +277,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -306,12 +306,12 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -367,14 +367,14 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -392,12 +392,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -418,6 +418,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -458,9 +459,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
if (DISPLAY_VER(i915) >= 7)
- ivb_display_irq_handler(i915, de_iir);
+ ivb_display_irq_handler(display, de_iir);
else
- ilk_display_irq_handler(i915, de_iir);
+ ilk_display_irq_handler(display, de_iir);
ret = IRQ_HANDLED;
}
@@ -506,6 +507,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -524,7 +526,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- gen8_de_irq_handler(dev_priv, master_ctl);
+ gen8_de_irq_handler(display, master_ctl);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
@@ -556,6 +558,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -575,13 +578,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -613,6 +616,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
+ struct intel_display *display = &i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -641,13 +645,13 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
gen11_gt_irq_handler(gt, master_ctl);
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -691,24 +695,27 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(display);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
gen8_gt_irq_reset(to_gt(dev_priv));
- gen8_display_irq_reset(dev_priv);
+ gen8_display_irq_reset(display);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
if (HAS_PCH_SPLIT(dev_priv))
@@ -718,13 +725,14 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -732,6 +740,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -741,7 +750,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -751,6 +760,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -761,23 +771,27 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(display);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- ilk_de_irq_postinstall(dev_priv);
+ ilk_de_irq_postinstall(display);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(display);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
@@ -786,20 +800,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
gen11_gt_irq_postinstall(gt);
- gen11_de_irq_postinstall(dev_priv);
+ gen11_de_irq_postinstall(display);
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
@@ -809,6 +826,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -819,7 +837,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
- dg1_de_irq_postinstall(dev_priv);
+ dg1_de_irq_postinstall(display);
dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
@@ -827,10 +845,12 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(display);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -900,9 +920,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -911,6 +932,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -932,7 +954,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(dev_priv)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
@@ -942,16 +964,17 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
- i915_enable_asle_pipestat(dev_priv);
+ i915_enable_asle_pipestat(display);
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -972,13 +995,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (I915_HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -992,9 +1015,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i915_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -1006,9 +1029,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -1036,6 +1060,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1064,17 +1089,18 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
- i915_enable_asle_pipestat(dev_priv);
+ i915_enable_asle_pipestat(display);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1096,11 +1122,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -1119,9 +1145,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i965_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
@@ -1280,6 +1306,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
@@ -1289,7 +1316,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
free_irq(irq, dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
dev_priv->irqs_enabled = false;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c5064eebe063..49beab8e324d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1077,6 +1077,7 @@
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
#define DPCE_GATING_DIS REG_BIT(17)
@@ -4242,6 +4243,11 @@ enum skl_power_gate {
#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d40ee1b42110..59bd603e6deb 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -171,6 +171,17 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}
+bool intel_memory_type_is_local(enum intel_memory_type mem_type)
+{
+ switch (mem_type) {
+ case INTEL_MEMORY_LOCAL:
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
@@ -216,7 +227,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
-static const char *region_type_str(u16 type)
+const char *intel_memory_type_str(enum intel_memory_type type)
{
switch (type) {
case INTEL_MEMORY_SYSTEM:
@@ -260,7 +271,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->instance = instance;
snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
- region_type_str(type), instance);
+ intel_memory_type_str(type), instance);
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 5973b6fe13cf..b3b75be9ced5 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -85,6 +85,8 @@ struct intel_memory_region {
void *region_private;
};
+bool intel_memory_type_is_local(enum intel_memory_type mem_type);
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance);
@@ -107,6 +109,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915);
struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type);
+const char *intel_memory_type_str(enum intel_memory_type type);
__printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 48836ef52d40..a2894a56e18f 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,8 +7,6 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
-#include <drm/drm_print.h>
-
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -16,11 +14,13 @@
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/ref_tracker.h>
-#include <linux/slab.h>
-#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+struct drm_printer;
+struct intel_runtime_pm;
+struct intel_wakeref;
+
typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
@@ -32,9 +32,6 @@ typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-struct intel_runtime_pm;
-struct intel_wakeref;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index fee76c1d2f45..889281819c5b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,7 +23,9 @@
#include <linux/random.h>
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_regs.h"
#include "gt/uc/intel_gsc_fw.h"
#include "i915_driver.h"
@@ -253,11 +255,27 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_uncore *uncore = &i915->uncore;
int err;
+ u32 pg_enable;
+ intel_wakeref_t wakeref;
if (!i915_selftest.live)
return 0;
+ /*
+ * FIXME Disable render powergating, this is temporary wa and should be removed
+ * after fixing real cause of forcewake timeouts.
+ */
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) {
+ pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ if (pg_enable & GEN9_RENDER_PG_ENABLE)
+ intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
+ pg_enable & ~GEN9_RENDER_PG_ENABLE);
+ }
+ }
+
__wait_gsc_proxy_completed(i915);
__wait_gsc_huc_load_completed(i915);
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 9e310f4099f4..eee5c4f45a43 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -33,8 +33,14 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
+ DRAM_TYPE_STR(DDR5),
+ DRAM_TYPE_STR(LPDDR5),
+ DRAM_TYPE_STR(GDDR),
+ DRAM_TYPE_STR(GDDR_ECC),
};
+ BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX);
+
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
@@ -444,8 +450,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
int ret;
dram_info->type = skl_get_dram_type(i915);
- drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
ret = skl_dram_get_channels_info(i915);
if (ret)
@@ -560,10 +564,9 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_info->type != type);
drm_dbg_kms(&i915->drm,
- "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
+ dimm.size, dimm.width, dimm.ranks);
if (valid_ranks == 0)
valid_ranks = dimm.ranks;
@@ -687,6 +690,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
dram_info->type = INTEL_DRAM_GDDR;
break;
+ case 9:
+ drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
+ dram_info->type = INTEL_DRAM_GDDR_ECC;
+ break;
default:
MISSING_CASE(val);
return -EINVAL;
@@ -726,6 +733,10 @@ void intel_dram_detect(struct drm_i915_private *i915)
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
+
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
if (ret)
return;
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index d43efa8810b8..012596402a33 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -728,7 +728,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
fw_mem->core_data, fw_mem->core_code_alloc_size);
if (err)
- goto err_free_fw_core_data_obj;
+ goto err_free_kdata;
memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
@@ -738,10 +738,14 @@ pvr_fw_process(struct pvr_device *pvr_dev)
memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
/* We're finished with the firmware section memory on the CPU, unmap. */
- if (fw_core_data_ptr)
+ if (fw_core_data_ptr) {
pvr_fw_object_vunmap(fw_mem->core_data_obj);
- if (fw_core_code_ptr)
+ fw_core_data_ptr = NULL;
+ }
+ if (fw_core_code_ptr) {
pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ fw_core_code_ptr = NULL;
+ }
pvr_fw_object_vunmap(fw_mem->data_obj);
fw_data_ptr = NULL;
pvr_fw_object_vunmap(fw_mem->code_obj);
@@ -749,7 +753,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
if (err)
- goto err_free_fw_core_data_obj;
+ goto err_free_kdata;
return 0;
@@ -759,13 +763,16 @@ err_free_kdata:
kfree(fw_mem->data);
kfree(fw_mem->code);
-err_free_fw_core_data_obj:
if (fw_core_data_ptr)
- pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
+ pvr_fw_object_vunmap(fw_mem->core_data_obj);
+ if (fw_mem->core_data_obj)
+ pvr_fw_object_destroy(fw_mem->core_data_obj);
err_free_fw_core_code_obj:
if (fw_core_code_ptr)
- pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
+ pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ if (fw_mem->core_code_obj)
+ pvr_fw_object_destroy(fw_mem->core_code_obj);
err_free_fw_data_obj:
if (fw_data_ptr)
@@ -832,6 +839,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev)
struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
pvr_fw_fini_fwif_connection_ctl(pvr_dev);
+
+ kfree(fw_mem->core_data);
+ kfree(fw_mem->core_code);
+ kfree(fw_mem->data);
+ kfree(fw_mem->code);
+
if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj);
if (fw_mem->core_data_obj)
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 1cdb3cfd058d..59b334d094fa 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -671,6 +671,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
geom_job->paired_job = frag_job;
frag_job->paired_job = geom_job;
+ /* The geometry job pvr_job structure is used when the fragment
+ * job is being prepared by the GPU scheduler. Have the fragment
+ * job hold a reference on the geometry job to prevent it being
+ * freed until the fragment job has finished with it.
+ */
+ pvr_job_get(geom_job);
+
/* Skip the fragment job we just paired to the geometry job. */
i++;
}
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index eba69309bb6c..5e9bc0992824 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job)
struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
drm_sched_job_cleanup(sched_job);
+
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job)
+ pvr_job_put(job->paired_job);
+
job->paired_job = NULL;
pvr_job_put(job);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db961eade225..2016c1e7242f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+ if (bo->base.import_attach)
+ drm_prime_gem_destroy(&bo->base, bo->sg);
+
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9ae2cee1c7c5..67e3c99de73a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
- if (gem->import_attach)
- drm_prime_gem_destroy(gem, nvbo->bo.sg);
-
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index 3d1dddb34603..7d531b6f4c09 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -94,6 +94,7 @@ struct rockchip_hdmi_qp {
struct gpio_desc *enable_gpio;
struct delayed_work hpd_work;
int port_id;
+ const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
};
struct rockchip_hdmi_qp_ctrl_ops {
@@ -461,6 +462,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return -ENODEV;
}
+ hdmi->ctrl_ops = cfg->ctrl_ops;
hdmi->dev = &pdev->dev;
hdmi->port_id = -ENODEV;
@@ -600,27 +602,8 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
- u32 val;
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap,
- hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
- val);
-
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
- RK3588_SET_HPD_PATH_MASK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
- RK3588_HDMI1_GRANT_SEL);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+ hdmi->ctrl_ops->io_init(hdmi);
dw_hdmi_qp_resume(dev, hdmi->hdmi);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index c5bcf8e2f30e..32c4ed685739 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -1754,9 +1754,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_DP1:
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP1_MUX, vp->id);
dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
break;
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f203ac5514ae..f778a4eee7c9 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -7,8 +7,6 @@ sti-drm-y := \
sti_compositor.o \
sti_crtc.o \
sti_plane.o \
- sti_crtc.o \
- sti_plane.o \
sti_hdmi.o \
sti_hdmi_tx3g4c28phy.o \
sti_dvo.o \
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index cd43d2a52a2d..3f44fe5e92e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -96,6 +96,9 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_ASSERT_TRUE(test,
drm_mode_parse_command_line_for_connector(cmdline,
connector,
@@ -130,7 +133,8 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- const struct drm_display_mode *expected_mode, *mode;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *expected_mode;
const char *cmdline = params->cmdline;
int ret;
@@ -150,6 +154,9 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
expected_mode = params->func(drm);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
index 59c8408c453c..1cfcb597b088 100644
--- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -7,6 +7,7 @@
#include <kunit/test.h>
#include <drm/drm_connector.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
static const struct drm_connector no_connector = {};
@@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test {
static void drm_test_cmdline_tv_options(struct kunit *test)
{
const struct drm_cmdline_tv_option_test *params = test->param_value;
- const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
+ struct drm_display_mode *expected_mode;
struct drm_cmdline_mode mode = { };
+ int ret;
+
+ expected_mode = params->mode_fn(NULL);
+ KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
&no_connector, &mode));
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 14ad8f0a0af1..5f7257840d8e 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -340,6 +340,28 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
+ * drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action
+ * @test: The test context object
+ * @mode: The drm_display_mode to destroy eventually
+ *
+ * Registers a kunit action that will destroy the drm_display_mode at
+ * the end of the test.
+ *
+ * If an error occurs, the drm_display_mode will be destroyed.
+ *
+ * Returns:
+ * 0 on success, an error code otherwise.
+ */
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode)
+{
+ return kunit_add_action_or_reset(test,
+ kunit_action_drm_mode_destroy,
+ mode);
+}
+EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action);
+
+/**
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
index 6ed51f99e133..f5b20f92df8b 100644
--- a/drivers/gpu/drm/tests/drm_modes_test.c
+++ b/drivers/gpu/drm/tests/drm_modes_test.c
@@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
+ int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
mode = drm_mode_analog_ntsc_480i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
+ int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
mode = drm_mode_analog_pal_576i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@@ -134,6 +156,7 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_MONOCHROME,
@@ -141,6 +164,9 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
index bc09ff38aca1..db0e4f5df275 100644
--- a/drivers/gpu/drm/tests/drm_probe_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
@@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_display_mode *mode;
- const struct drm_display_mode *expected;
+ struct drm_display_mode *expected;
size_t len;
int ret;
@@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
}
if (params->num_expected_modes >= 2) {
@@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
}
mutex_unlock(&priv->drm->mode_config.mutex);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index dde8fc1a3689..90c99d83c4cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -115,13 +115,14 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_context_init)
virtio_gpu_create_context(obj->dev, file);
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return -ENOMEM;
- virtio_gpu_array_add_obj(objs, obj);
+ if (vfpriv->context_created) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
- if (vfpriv->ctx_id)
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+ }
out_notify:
virtio_gpu_notify(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a6f5a78f436a..87e584add042 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -366,12 +366,6 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
- if (obj->import_attach) {
- ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
- if (ret)
- return ret;
- }
-
if (bo->dumb || obj->import_attach) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
@@ -380,7 +374,21 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
+ if (obj->import_attach) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ goto err_fence;
+ }
+
return 0;
+
+err_fence:
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
+ }
+
+ return ret;
}
static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index c6f3be3cb914..9b4bde3fda18 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -320,6 +320,7 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
+ obj->resv = buf->resv;
obj->funcs = &virtgpu_gem_dma_buf_funcs;
drm_gem_private_object_init(dev, obj, buf->size);
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 9699b08585f7..cd464fe26eb8 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -185,6 +185,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/intel_fbdev_fb.o \
display/xe_display.o \
display/xe_display_misc.o \
+ display/xe_display_rpm.o \
display/xe_display_rps.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index dfec5108d2c3..f89bd5e3520d 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -13,7 +13,6 @@
#include <drm/drm_drv.h>
#include "i915_utils.h"
-#include "intel_runtime_pm.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
deleted file mode 100644
index 274042bff1be..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RUNTIME_PM_H__
-#define __INTEL_RUNTIME_PM_H__
-
-#include "intel_wakeref.h"
-#include "xe_device_types.h"
-#include "xe_pm.h"
-
-#define intel_runtime_pm xe_runtime_pm
-
-static inline void disable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline void enable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline bool
-intel_runtime_pm_suspended(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return pm_runtime_suspended(xe->drm.dev);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_get_noresume(xe);
-
- return INTEL_WAKEREF_DEF;
-}
-
-static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_put(xe);
-}
-
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
-{
- if (wakeref)
- intel_runtime_pm_put_unchecked(pm);
-}
-
-#define intel_runtime_pm_get_raw intel_runtime_pm_get
-#define intel_runtime_pm_put_raw intel_runtime_pm_put
-#define assert_rpm_wakelock_held(x) do { } while (0)
-#define assert_rpm_raw_wakeref_held(x) do { } while (0)
-
-#define with_intel_runtime_pm(rpm, wf) \
- for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
-
-#endif
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 0b0aca7a25af..20c3bcd953b7 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -147,7 +147,7 @@ int xe_display_init_early(struct xe_device *xe)
*/
intel_dram_detect(xe);
- intel_bw_init_hw(xe);
+ intel_bw_init_hw(display);
intel_display_device_info_runtime_init(display);
@@ -173,7 +173,7 @@ static void xe_display_fini(void *arg)
struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
- intel_hpd_poll_fini(xe);
+ intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
intel_audio_deinit(display);
intel_display_driver_remove(display);
@@ -220,11 +220,13 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
- gen11_display_irq_handler(xe);
+ gen11_display_irq_handler(display);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
@@ -240,19 +242,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- gen11_display_irq_reset(xe);
+ gen11_display_irq_reset(display);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(xe);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -305,7 +311,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
if (has_display(xe))
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
static void xe_display_disable_d3cold(struct xe_device *xe)
@@ -322,10 +328,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_display_driver_init_hw(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -355,7 +361,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
@@ -385,7 +391,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe))
intel_display_driver_suspend_access(display);
@@ -400,6 +406,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -408,7 +416,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
return;
}
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
@@ -482,7 +490,7 @@ void xe_display_pm_resume(struct xe_device *xe)
if (has_display(xe))
intel_display_driver_resume_access(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe)) {
intel_display_driver_resume(display);
@@ -491,7 +499,7 @@ void xe_display_pm_resume(struct xe_device *xe)
}
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -502,6 +510,8 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -510,9 +520,9 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
return;
}
- intel_hpd_init(xe);
- intel_hpd_poll_disable(xe);
- skl_watermark_ipc_update(xe);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
+ skl_watermark_ipc_update(display);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
new file mode 100644
index 000000000000..1955153aadba
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "intel_display_rpm.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+
+static struct xe_device *display_to_xe(struct intel_display *display)
+{
+ return container_of(display, struct xe_device, display);
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_display_rpm_get(display);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_display_rpm_put(display, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ xe_pm_runtime_get_noresume(display_to_xe(display));
+
+ return INTEL_WAKEREF_DEF;
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ if (wakeref)
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ struct xe_device *xe = display_to_xe(display);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68e3d1959ad6..2933ca97d673 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -10,7 +10,9 @@
#include <generated/xe_wa_oob.h>
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
- return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ return XE_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index a255946b6f77..8cfcd3360896 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -41,6 +41,7 @@
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */
#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 72ef0b6fc425..f7f2afa596c5 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -585,6 +585,8 @@ struct xe_device {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 03072e094991..084cbdeba8ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -322,6 +322,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
/**
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
@@ -346,6 +353,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
int len = 0;
xe_gt_assert(gt, fence);
@@ -358,11 +366,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation) {
+ if (!xe->info.has_range_tlb_invalidation ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
u64 orig_start = start;
- u64 length = end - start;
u64 align;
if (length < SZ_4K)
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 85215313976c..43b1192ba61c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -1070,6 +1070,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ ret = -EIO;
goto out;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 8c05fd30b7df..93241fd0a4ba 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -389,12 +389,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- /* Use Fixed slice CCS mode */
- { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
- XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
- XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
- RCU_MODE_FIXED_SLICE_CCS_MODE))
- },
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@@ -461,6 +455,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
};
xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index b53e8d2accdb..a440442b4d72 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
return timeout >= min && timeout <= max;
}
-static void kobj_xe_hw_engine_release(struct kobject *kobj)
+static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type kobj_xe_hw_engine_type = {
- .release = kobj_xe_hw_engine_release,
- .sysfs_ops = &kobj_sysfs_ops
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
+};
+
+static const struct kobj_type kobj_xe_hw_engine_type_def = {
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
};
static ssize_t job_timeout_max_store(struct kobject *kobj,
@@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &kobj_xe_hw_engine_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type_def);
err = kobject_add(kobj, parent, "%s", ".defaults");
if (err)
goto err_object;
@@ -559,57 +606,6 @@ err_object:
return err;
}
-static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
-{
- kfree(kobj);
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->show) {
- xe_pm_runtime_get(xe);
- ret = kattr->show(kobj, kattr, buf);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t count)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->store) {
- xe_pm_runtime_get(xe);
- ret = kattr->store(kobj, kattr, buf, count);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
- .show = xe_hw_engine_class_sysfs_attr_show,
- .store = xe_hw_engine_class_sysfs_attr_store,
-};
-
-static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
- .release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
-};
static void hw_engine_class_sysfs_fini(void *arg)
{
@@ -640,7 +636,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type);
err = kobject_add(kobj, gt->sysfs, "engines");
if (err)
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index df4282c71bf0..5a3e89022c38 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1177,7 +1177,7 @@ err:
err_sync:
/* Sync partial copies if any. FIXME: job_mutex? */
if (fence) {
- dma_fence_wait(m->fence, false);
+ dma_fence_wait(fence, false);
dma_fence_put(fence);
}
@@ -1547,7 +1547,7 @@ void xe_migrate_wait(struct xe_migrate *m)
static u32 pte_update_cmd_size(u64 size)
{
u32 num_dword;
- u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
/*
@@ -1558,7 +1558,7 @@ static u32 pte_update_cmd_size(u64 size)
* 2 dword for the page table's physical location
* 2*n dword for value of pte to fill (each pte entry is 2 dwords)
*/
- num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
+ num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
num_dword += entries * 2;
return num_dword;
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 917fc16de866..a7582b097ae6 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -137,7 +137,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset,
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
- u32 flags = PIPE_CONTROL_CS_STALL |
+ u32 flags0 = 0;
+ u32 flags1 = PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -148,11 +149,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
PIPE_CONTROL_STORE_DATA_INDEX;
if (invalidate_tlb)
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
- flags &= ~mask_flags;
+ flags1 &= ~mask_flags;
- return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
+ if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
+ flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
+
+ return emit_pipe_control(dw, i, flags0, flags1,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 3e829c87d7b4..f8c128524d9f 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -696,11 +696,14 @@ retry:
list_for_each_entry(block, blocks, link)
block->private = vr;
+ xe_bo_get(bo);
err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
&bo->devmem_allocation, ctx);
- xe_bo_unlock(bo);
if (err)
- xe_bo_put(bo); /* Creation ref */
+ xe_svm_devmem_release(&bo->devmem_allocation);
+
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
unlock:
mmap_read_unlock(mm);
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 0c738af24f7c..9b9e176992a8 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -32,8 +32,10 @@
GRAPHICS_VERSION(3001)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index d525ab43a4ae..dd7d030d2e89 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -487,17 +487,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
/* VCMDQ Resource Helpers */
-static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
-{
- struct arm_smmu_queue *q = &vcmdq->cmdq.q;
- size_t nents = 1 << q->llq.max_n_shift;
- size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
-
- if (!q->base)
- return;
- dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
-}
-
static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
{
struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
@@ -560,7 +549,8 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
char header[64];
- tegra241_vcmdq_free_smmu_cmdq(vcmdq);
+ /* Note that the lvcmdq queue memory space is managed by devres */
+
tegra241_vintf_deinit_lvcmdq(vintf, lidx);
dev_dbg(vintf->cmdqv->dev,
@@ -768,13 +758,13 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
if (!vintf)
- goto out_fallback;
+ return -ENOMEM;
/* Init VINTF0 for in-kernel use */
ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
if (ret) {
dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
- goto free_vintf;
+ return ret;
}
/* Preallocate logical VCMDQs to VINTF0 */
@@ -783,24 +773,12 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
if (IS_ERR(vcmdq))
- goto free_lvcmdq;
+ return PTR_ERR(vcmdq);
}
/* Now, we are ready to run all the impl ops */
smmu->impl_ops = &tegra241_cmdqv_impl_ops;
return 0;
-
-free_lvcmdq:
- for (lidx--; lidx >= 0; lidx--)
- tegra241_vintf_free_lvcmdq(vintf, lidx);
- tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
-free_vintf:
- kfree(vintf);
-out_fallback:
- dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
- smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
- tegra241_cmdqv_remove(smmu);
- return 0;
}
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index cb7e29dcac15..a775e4dbe06f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1754,7 +1754,7 @@ static size_t cookie_msi_granule(const struct iommu_domain *domain)
return PAGE_SIZE;
default:
BUG();
- };
+ }
}
static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
@@ -1766,7 +1766,7 @@ static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
return &domain->msi_cookie->msi_page_list;
default:
BUG();
- };
+ }
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 69e23e017d9e..317266aca6e2 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (&data->domain->domain != &exynos_identity_domain) {
+ if (data->domain) {
dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data);
}
@@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (&data->domain->domain != &exynos_identity_domain) {
+ if (data->domain) {
dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6e67cc66a204..b29da2d96d0b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3835,7 +3835,6 @@ static void intel_iommu_release_device(struct device *dev)
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
kfree(info);
- set_dma_ops(dev, NULL);
}
static void intel_iommu_get_resv_regions(struct device *device,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index ea3ca5203919..3bc2a03cceca 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1287,43 +1287,44 @@ static struct irq_chip intel_ir_chip = {
};
/*
- * With posted MSIs, all vectors are multiplexed into a single notification
- * vector. Devices MSIs are then dispatched in a demux loop where
- * EOIs can be coalesced as well.
+ * With posted MSIs, the MSI vectors are multiplexed into a single notification
+ * vector, and only the notification vector is sent to the APIC IRR. Device
+ * MSIs are then dispatched in a demux loop that harvests the MSIs from the
+ * CPU's Posted Interrupt Request bitmap. I.e. Posted MSIs never get sent to
+ * the APIC IRR, and thus do not need an EOI. The notification handler instead
+ * performs a single EOI after processing the PIR.
*
- * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack()
- * function. Instead EOI is performed by the posted interrupt notification
- * handler.
+ * Note! Pending SMP/CPU affinity changes, which are per MSI, must still be
+ * honored, only the APIC EOI is omitted.
*
* For the example below, 3 MSIs are coalesced into one CPU notification. Only
- * one apic_eoi() is needed.
+ * one apic_eoi() is needed, but each MSI needs to process pending changes to
+ * its CPU affinity.
*
* __sysvec_posted_msi_notification()
* irq_enter();
* handle_edge_irq()
* irq_chip_ack_parent()
- * dummy(); // No EOI
+ * irq_move_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * dummy(); // No EOI
+ * irq_move_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * dummy(); // No EOI
+ * irq_move_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* apic_eoi()
* irq_exit()
+ *
*/
-
-static void dummy_ack(struct irq_data *d) { }
-
static struct irq_chip intel_ir_chip_post_msi = {
.name = "INTEL-IR-POST",
- .irq_ack = dummy_ack,
+ .irq_ack = irq_move_irq,
.irq_set_affinity = intel_ir_set_affinity,
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c8033ca66377..4f91a740c15f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -538,6 +538,9 @@ static void iommu_deinit_device(struct device *dev)
dev->iommu_group = NULL;
module_put(ops->owner);
dev_iommu_free(dev);
+#ifdef CONFIG_IOMMU_DMA
+ dev->dma_iommu = false;
+#endif
}
static struct iommu_domain *pasid_array_entry_to_domain(void *entry)
@@ -2717,7 +2720,8 @@ int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
* if upper layers showed interest and installed a fault handler,
* invoke it.
*/
- if (domain->handler)
+ if (domain->cookie_type == IOMMU_COOKIE_FAULT_HANDLER &&
+ domain->handler)
ret = domain->handler(domain, dev, iova, flags,
domain->handler_token);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 074daf1aac4e..e424b279a8cd 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1081,31 +1081,24 @@ static int ipmmu_probe(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, mmu);
/*
* Register the IPMMU to the IOMMU subsystem in the following cases:
* - R-Car Gen2 IPMMU (all devices registered)
* - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
*/
- if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
-
- ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
- if (ret)
- return ret;
- }
+ if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu))
+ return 0;
- /*
- * We can't create the ARM mapping here as it requires the bus to have
- * an IOMMU, which only happens when bus_set_iommu() is called in
- * ipmmu_init() after the probe function returns.
- */
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev));
+ if (ret)
+ return ret;
- platform_set_drvdata(pdev, mmu);
+ ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
+ if (ret)
+ iommu_device_sysfs_remove(&mmu->iommu);
- return 0;
+ return ret;
}
static void ipmmu_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 034b0e670384..df98d0c65f54 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1372,15 +1372,6 @@ static int mtk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
mutex_init(&data->mutex);
- ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
- "mtk-iommu.%pa", &ioaddr);
- if (ret)
- goto out_link_remove;
-
- ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
- if (ret)
- goto out_sysfs_remove;
-
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
list_add_tail(&data->list, data->plat_data->hw_list);
data->hw_list = data->plat_data->hw_list;
@@ -1390,19 +1381,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head;
}
+ ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+ "mtk-iommu.%pa", &ioaddr);
+ if (ret)
+ goto out_list_del;
+
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
+ if (ret)
+ goto out_sysfs_remove;
+
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret)
- goto out_list_del;
+ goto out_device_unregister;
}
return ret;
-out_list_del:
- list_del(&data->list);
+out_device_unregister:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
-out_link_remove:
+out_list_del:
+ list_del(&data->list);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
device_link_remove(data->smicomm_dev, dev);
out_runtime_disable:
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
index 49a19db2d1e1..4cce24233f0f 100644
--- a/drivers/irqchip/irq-bcm2712-mip.c
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -163,6 +163,7 @@ static const struct irq_domain_ops mip_middle_domain_ops = {
static const struct msi_parent_ops mip_msi_parent_ops = {
.supported_flags = MIP_MSI_FLAGS_SUPPORTED,
.required_flags = MIP_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PCI_MSI,
.prefix = "MIP-MSI-",
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index ee682e87eb8b..375b55aa0acd 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -151,6 +151,7 @@ static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
static const struct msi_parent_ops sg2042_msi_parent_ops = {
.required_flags = SG2042_MSI_FLAGS_REQUIRED,
.supported_flags = SG2042_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "SG2042-",
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 06f809e70f15..ddb37f6670de 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -139,7 +139,7 @@ config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD
select RAID6_PQ
- select LIBCRC32C
+ select CRC32
select ASYNC_MEMCPY
select ASYNC_XOR
select ASYNC_PQ
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index f4f948b0e173..dbb97a7233ab 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -2,7 +2,7 @@
config DM_PERSISTENT_DATA
tristate
depends on BLK_DEV_DM
- select LIBCRC32C
+ select CRC32
select DM_BUFIO
help
Library providing immutable on-disk data structure support for
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 9739387cff8c..58c6e1743f5c 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
silly = MAX_LOOPS;
while (thisEUN <= inftl->lastEUN) {
- inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
- blockofs, 8, &retlen, (char *)&bci);
-
- status = bci.Status | bci.Status1;
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
pr_debug("INFTL: status of block %d in EUN %d is %x\n",
block , writeEUN, status);
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index db516a45f0c5..44913ff1bf12 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,11 +3,8 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
-ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
-else
obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
-endif
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index b07c2f8b4035..918974d088cf 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip)
static int r852_ready(struct nand_chip *chip)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ if (dev->card_unstable)
+ return 0;
+
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index eeec8bf17cf4..1bd4313215d7 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -143,7 +143,7 @@ config BNX2X
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select ZLIB_INFLATE
- select LIBCRC32C
+ select CRC32
select MDIO
help
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
@@ -207,7 +207,7 @@ config BNXT
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
- select LIBCRC32C
+ select CRC32
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index ca742cc146d7..7dae5aad3689 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -70,8 +70,8 @@ config LIQUIDIO
depends on 64BIT && PCI
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select FW_LOADER
- select LIBCRC32C
select LIQUIDIO_CORE
select NET_DEVLINK
help
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 0f844c14485a..35acc07bd964 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL2) {
+ /* configure parent txschq */
+ cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
+ cfg->regval[num_regs] = (u64)hw->tx_link << 16;
+ num_regs++;
+
/* configure link cfg */
if (level == pfvf->qos.link_cfg_lvl) {
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 00b0b318df27..e69eaa65e0de 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -310,7 +310,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
return true;
page = page_pool_dev_alloc_pages(rx_ring->page_pool);
- WARN_ON(!page);
+ if (unlikely(!page))
+ return false;
dma = page_pool_get_dma_addr(page);
bi->page_dma = dma;
@@ -546,7 +547,8 @@ static void wx_rx_checksum(struct wx_ring *ring,
return;
/* Hardware can't guarantee csum if IPv6 Dest Header found */
- if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc))
+ if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP &&
+ wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX))
return;
/* if L4 checksum error */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 5b230ecbbabb..4c545b2aa997 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -513,6 +513,7 @@ enum WX_MSCA_CMD_value {
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
+#define WX_RXD_STAT_IPV6EX BIT(12) /* IPv6 Dest Header */
#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
@@ -589,8 +590,6 @@ enum wx_l2_ptypes {
#define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
-#define WX_RXD_IPV6EX(_rxd) \
- ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
/*********************** Transmit Descriptor Config Masks ****************/
#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 675fbd225378..cc1bfd22fb81 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -244,6 +244,46 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev)
return wol.wolopts != 0;
}
+static void phy_link_change(struct phy_device *phydev, bool up)
+{
+ struct net_device *netdev = phydev->attached_dev;
+
+ if (up)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
+ phydev->adjust_link(netdev);
+ if (phydev->mii_ts && phydev->mii_ts->link_state)
+ phydev->mii_ts->link_state(phydev->mii_ts, phydev);
+}
+
+/**
+ * phy_uses_state_machine - test whether consumer driver uses PAL state machine
+ * @phydev: the target PHY device structure
+ *
+ * Ultimately, this aims to indirectly determine whether the PHY is attached
+ * to a consumer which uses the state machine by calling phy_start() and
+ * phy_stop().
+ *
+ * When the PHY driver consumer uses phylib, it must have previously called
+ * phy_connect_direct() or one of its derivatives, so that phy_prepare_link()
+ * has set up a hook for monitoring state changes.
+ *
+ * When the PHY driver is used by the MAC driver consumer through phylink (the
+ * only other provider of a phy_link_change() method), using the PHY state
+ * machine is not optional.
+ *
+ * Return: true if consumer calls phy_start() and phy_stop(), false otherwise.
+ */
+static bool phy_uses_state_machine(struct phy_device *phydev)
+{
+ if (phydev->phy_link_change == phy_link_change)
+ return phydev->attached_dev && phydev->adjust_link;
+
+ /* phydev->phy_link_change is implicitly phylink_phy_change() */
+ return true;
+}
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->mdio.dev.driver;
@@ -310,7 +350,7 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
*/
- if (phydev->attached_dev && phydev->adjust_link)
+ if (phy_uses_state_machine(phydev))
phy_stop_machine(phydev);
if (!mdio_bus_phy_may_suspend(phydev))
@@ -364,7 +404,7 @@ no_resume:
}
}
- if (phydev->attached_dev && phydev->adjust_link)
+ if (phy_uses_state_machine(phydev))
phy_start_machine(phydev);
return 0;
@@ -1055,19 +1095,6 @@ struct phy_device *phy_find_first(struct mii_bus *bus)
}
EXPORT_SYMBOL(phy_find_first);
-static void phy_link_change(struct phy_device *phydev, bool up)
-{
- struct net_device *netdev = phydev->attached_dev;
-
- if (up)
- netif_carrier_on(netdev);
- else
- netif_carrier_off(netdev);
- phydev->adjust_link(netdev);
- if (phydev->mii_ts && phydev->mii_ts->link_state)
- phydev->mii_ts->link_state(phydev->mii_ts, phydev);
-}
-
/**
* phy_prepare_link - prepares the PHY layer to monitor link status
* @phydev: target phy_device struct
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 644e99fc3623..9c4932198931 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
unsigned char *data;
int islcp;
+ /* Ensure we can safely access protocol field and LCP code */
+ if (!pskb_may_pull(skb, 3)) {
+ kfree_skb(skb);
+ return NULL;
+ }
data = skb->data;
proto = get_unaligned_be16(data);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cc23035148b4..b502ac07483b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4295,6 +4295,15 @@ static void nvme_scan_work(struct work_struct *work)
nvme_scan_ns_sequential(ctrl);
}
mutex_unlock(&ctrl->scan_lock);
+
+ /* Requeue if we have missed AENs */
+ if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
+ nvme_queue_scan(ctrl);
+#ifdef CONFIG_NVME_MULTIPATH
+ else
+ /* Re-read the ANA log page to not miss updates */
+ queue_work(nvme_wq, &ctrl->ana_work);
+#endif
}
/*
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 89be5911b25d..05eccd96d34a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -427,7 +427,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
- return NULL;
+ return false;
list_for_each_entry_srcu(ns, &head->list, siblings,
srcu_read_lock_held(&head->srcu)) {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 26c459f0198d..72d260201d8c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1803,6 +1803,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
ret = PTR_ERR(sock_file);
goto err_destroy_mutex;
}
+
+ sk_net_refcnt_upgrade(queue->sock->sk);
nvme_tcp_reclassify_socket(queue->sock);
/* Single syn retry */
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7318b736d414..7b50130f10f6 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -995,16 +995,6 @@ nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
return kref_get_unless_zero(&hostport->ref);
}
-static void
-nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
-{
- /* if LLDD not implemented, leave as NULL */
- if (!hostport || !hostport->hosthandle)
- return;
-
- nvmet_fc_hostport_put(hostport);
-}
-
static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
@@ -1028,33 +1018,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
+ /*
+ * Caller holds a reference on tgtport.
+ */
+
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
- /*
- * take reference for what will be the newly allocated hostport if
- * we end up using a new allocation
- */
- if (!nvmet_fc_tgtport_get(tgtport))
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
- if (match) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (match)
return match;
- }
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
- if (!newhost) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (!newhost)
return ERR_PTR(-ENOMEM);
- }
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
@@ -1063,6 +1044,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
kfree(newhost);
newhost = match;
} else {
+ nvmet_fc_tgtport_get(tgtport);
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
@@ -1076,20 +1058,14 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
-{
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
-}
-
-static void
nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1097,7 +1073,8 @@ static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_tgtport_get(assoc->tgtport);
- queue_work(nvmet_wq, &assoc->del_work);
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ nvmet_fc_tgtport_put(assoc->tgtport);
}
static bool
@@ -1143,6 +1120,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
goto out_ida;
assoc->tgtport = tgtport;
+ nvmet_fc_tgtport_get(tgtport);
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
@@ -1190,7 +1168,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
- nvmet_fc_free_hostport(assoc->hostport);
+ nvmet_fc_hostport_put(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1244,6 +1222,8 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
+
+ nvmet_fc_tgtport_put(tgtport);
}
static struct nvmet_fc_tgt_assoc *
@@ -1455,11 +1435,6 @@ nvmet_fc_free_tgtport(struct kref *ref)
struct nvmet_fc_tgtport *tgtport =
container_of(ref, struct nvmet_fc_tgtport, ref);
struct device *dev = tgtport->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- list_del(&tgtport->tgt_list);
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_free_ls_iodlist(tgtport);
@@ -1620,6 +1595,11 @@ int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_del(&tgtport->tgt_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_portentry_unbind_tgt(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index e1abb27927ff..641201e62c1b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -208,6 +208,7 @@ struct fcloop_lport {
struct nvme_fc_local_port *localport;
struct list_head lport_list;
struct completion unreg_done;
+ refcount_t ref;
};
struct fcloop_lport_priv {
@@ -239,7 +240,7 @@ struct fcloop_nport {
struct fcloop_tport *tport;
struct fcloop_lport *lport;
struct list_head nport_list;
- struct kref ref;
+ refcount_t ref;
u64 node_name;
u64 port_name;
u32 port_role;
@@ -274,7 +275,7 @@ struct fcloop_fcpreq {
u32 inistate;
bool active;
bool aborted;
- struct kref ref;
+ refcount_t ref;
struct work_struct fcp_rcv_work;
struct work_struct abort_rcv_work;
struct work_struct tio_done_work;
@@ -478,7 +479,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (targetport) {
tport = targetport->private;
spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
}
@@ -534,24 +535,18 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
}
static void
-fcloop_tfcp_req_free(struct kref *ref)
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
{
- struct fcloop_fcpreq *tfcp_req =
- container_of(ref, struct fcloop_fcpreq, ref);
+ if (!refcount_dec_and_test(&tfcp_req->ref))
+ return;
kfree(tfcp_req);
}
-static void
-fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
-{
- kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
-}
-
static int
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
{
- return kref_get_unless_zero(&tfcp_req->ref);
+ return refcount_inc_not_zero(&tfcp_req->ref);
}
static void
@@ -748,7 +743,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
- kref_init(&tfcp_req->ref);
+ refcount_set(&tfcp_req->ref, 1);
queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
@@ -1001,24 +996,39 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
static void
-fcloop_nport_free(struct kref *ref)
+fcloop_lport_put(struct fcloop_lport *lport)
{
- struct fcloop_nport *nport =
- container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
- kfree(nport);
+ if (!refcount_dec_and_test(&lport->ref))
+ return;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&lport->lport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(lport);
+}
+
+static int
+fcloop_lport_get(struct fcloop_lport *lport)
+{
+ return refcount_inc_not_zero(&lport->ref);
}
static void
fcloop_nport_put(struct fcloop_nport *nport)
{
- kref_put(&nport->ref, fcloop_nport_free);
+ if (!refcount_dec_and_test(&nport->ref))
+ return;
+
+ kfree(nport);
}
static int
fcloop_nport_get(struct fcloop_nport *nport)
{
- return kref_get_unless_zero(&nport->ref);
+ return refcount_inc_not_zero(&nport->ref);
}
static void
@@ -1029,6 +1039,8 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport)
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
+
+ fcloop_lport_put(lport);
}
static void
@@ -1140,6 +1152,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
+ refcount_set(&lport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
@@ -1156,13 +1169,6 @@ out_free_lport:
return ret ? ret : count;
}
-
-static void
-__unlink_local_port(struct fcloop_lport *lport)
-{
- list_del(&lport->lport_list);
-}
-
static int
__wait_localport_unreg(struct fcloop_lport *lport)
{
@@ -1175,8 +1181,6 @@ __wait_localport_unreg(struct fcloop_lport *lport)
if (!ret)
wait_for_completion(&lport->unreg_done);
- kfree(lport);
-
return ret;
}
@@ -1199,8 +1203,9 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
list_for_each_entry(tlport, &fcloop_lports, lport_list) {
if (tlport->localport->node_name == nodename &&
tlport->localport->port_name == portname) {
+ if (!fcloop_lport_get(tlport))
+ break;
lport = tlport;
- __unlink_local_port(lport);
break;
}
}
@@ -1210,6 +1215,7 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
return -ENOENT;
ret = __wait_localport_unreg(lport);
+ fcloop_lport_put(lport);
return ret ? ret : count;
}
@@ -1249,7 +1255,7 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
newnport->port_role = opts->roles;
if (opts->mask & NVMF_OPT_FCADDR)
newnport->port_id = opts->fcaddr;
- kref_init(&newnport->ref);
+ refcount_set(&newnport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
@@ -1637,17 +1643,17 @@ static void __exit fcloop_exit(void)
for (;;) {
lport = list_first_entry_or_null(&fcloop_lports,
typeof(*lport), lport_list);
- if (!lport)
+ if (!lport || !fcloop_lport_get(lport))
break;
- __unlink_local_port(lport);
-
spin_unlock_irqrestore(&fcloop_lock, flags);
ret = __wait_localport_unreg(lport);
if (ret)
pr_warn("%s: Failed deleting local port\n", __func__);
+ fcloop_lport_put(lport);
+
spin_lock_irqsave(&fcloop_lock, flags);
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8d610c17e0f2..94daca15a096 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1990,12 +1990,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
device_create_managed_software_node(&pdev->dev, properties, NULL))
pci_warn(pdev, "could not add stall property");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a40c511e0096..0387bd838487 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -322,7 +322,7 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
const struct pwm_ops *ops = chip->ops;
char wfhw[WFHWSIZE];
struct pwm_waveform wf_rounded;
- int err;
+ int err, ret_tohw;
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
@@ -332,16 +332,16 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
if (!pwm_wf_valid(wf))
return -EINVAL;
- err = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
- if (err)
- return err;
+ ret_tohw = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
+ if (ret_tohw < 0)
+ return ret_tohw;
if ((IS_ENABLED(CONFIG_PWM_DEBUG) || exact) && wf->period_length_ns) {
err = __pwm_round_waveform_fromhw(chip, pwm, &wfhw, &wf_rounded);
if (err)
return err;
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && !pwm_check_rounding(wf, &wf_rounded))
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_tohw == 0 && !pwm_check_rounding(wf, &wf_rounded))
dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n",
wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns);
@@ -382,7 +382,8 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns,
wf_set.duty_length_ns, wf_set.period_length_ns, wf_set.duty_offset_ns);
}
- return 0;
+
+ return ret_tohw;
}
/**
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
index 4259a0db9ff4..4337c8f5acf0 100644
--- a/drivers/pwm/pwm-axi-pwmgen.c
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -75,6 +75,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
{
struct axi_pwmgen_waveform *wfhw = _wfhw;
struct axi_pwmgen_ddata *ddata = axi_pwmgen_ddata_from_chip(chip);
+ int ret = 0;
if (wf->period_length_ns == 0) {
*wfhw = (struct axi_pwmgen_waveform){
@@ -91,12 +92,15 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
if (wfhw->period_cnt == 0) {
/*
* The specified period is too short for the hardware.
- * Let's round .duty_cycle down to 0 to get a (somewhat)
- * valid result.
+ * So round up .period_cnt to 1 (i.e. the smallest
+ * possible period). With .duty_cycle and .duty_offset
+ * being less than or equal to .period, their rounded
+ * value must be 0.
*/
wfhw->period_cnt = 1;
wfhw->duty_cycle_cnt = 0;
wfhw->duty_offset_cnt = 0;
+ ret = 1;
} else {
wfhw->duty_cycle_cnt = min_t(u64,
mul_u64_u32_div(wf->duty_length_ns, ddata->clk_rate_hz, NSEC_PER_SEC),
@@ -111,7 +115,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
ddata->clk_rate_hz, wfhw->period_cnt, wfhw->duty_cycle_cnt, wfhw->duty_offset_cnt);
- return 0;
+ return ret;
}
static int axi_pwmgen_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 2510c10ca473..c45a5fca4cbb 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -118,6 +118,9 @@ static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc,
unsigned long long exval;
rate = clk_get_rate(fpc->clk[fpc->period.clk_select]);
+ if (rate >> fpc->period.clk_ps == 0)
+ return 0;
+
exval = ticks;
exval *= 1000000000UL;
do_div(exval, rate >> fpc->period.clk_ps);
@@ -190,6 +193,9 @@ static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
unsigned int period = fpc->period.mod_period + 1;
unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period);
+ if (!period_ns)
+ return 0;
+
duty = (unsigned long long)duty_ns * period;
do_div(duty, period_ns);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 01dfa0fab80a..7eaab5831499 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -121,21 +121,25 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
reg_thres = PWMTHRES;
+ unsigned long clk_rate;
u64 resolution;
int ret;
ret = pwm_mediatek_clk_enable(chip, pwm);
-
if (ret < 0)
return ret;
+ clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]);
+ if (!clk_rate)
+ return -EINVAL;
+
/* Make sure we use the bus clock and not the 26MHz clock */
if (pc->soc->has_ck_26m_sel)
writel(0, pc->regs + PWM_CK_26M_SEL);
/* Using resolution in picosecond gets accuracy higher */
resolution = (u64)NSEC_PER_SEC * 1000;
- do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm]));
+ do_div(resolution, clk_rate);
cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
while (cnt_period > 8191) {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 2261789cc27d..578dbdd2d5a7 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -8,6 +8,7 @@
* - The hardware cannot generate a 0% duty cycle.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -102,23 +103,24 @@ static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp,
rcar_pwm_write(rp, value, RCAR_PWMCR);
}
-static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
- int period_ns)
+static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, u64 duty_ns,
+ u64 period_ns)
{
- unsigned long long one_cycle, tmp; /* 0.01 nanoseconds */
+ unsigned long long tmp;
unsigned long clk_rate = clk_get_rate(rp->clk);
u32 cyc, ph;
- one_cycle = NSEC_PER_SEC * 100ULL << div;
- do_div(one_cycle, clk_rate);
+ /* div <= 24 == RCAR_PWM_MAX_DIVISION, so the shift doesn't overflow. */
+ tmp = mul_u64_u64_div_u64(period_ns, clk_rate, (u64)NSEC_PER_SEC << div);
+ if (tmp > FIELD_MAX(RCAR_PWMCNT_CYC0_MASK))
+ tmp = FIELD_MAX(RCAR_PWMCNT_CYC0_MASK);
- tmp = period_ns * 100ULL;
- do_div(tmp, one_cycle);
- cyc = (tmp << RCAR_PWMCNT_CYC0_SHIFT) & RCAR_PWMCNT_CYC0_MASK;
+ cyc = FIELD_PREP(RCAR_PWMCNT_CYC0_MASK, tmp);
- tmp = duty_ns * 100ULL;
- do_div(tmp, one_cycle);
- ph = tmp & RCAR_PWMCNT_PH0_MASK;
+ tmp = mul_u64_u64_div_u64(duty_ns, clk_rate, (u64)NSEC_PER_SEC << div);
+ if (tmp > FIELD_MAX(RCAR_PWMCNT_PH0_MASK))
+ tmp = FIELD_MAX(RCAR_PWMCNT_PH0_MASK);
+ ph = FIELD_PREP(RCAR_PWMCNT_PH0_MASK, tmp);
/* Avoid prohibited setting */
if (cyc == 0 || ph == 0)
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index a59de4de18b6..ec2c05c9ee7a 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -103,22 +103,16 @@ static int stm32_pwm_round_waveform_tohw(struct pwm_chip *chip,
if (ret)
goto out;
- /*
- * calculate the best value for ARR for the given PSC, refuse if
- * the resulting period gets bigger than the requested one.
- */
arr = mul_u64_u64_div_u64(wf->period_length_ns, rate,
(u64)NSEC_PER_SEC * (wfhw->psc + 1));
if (arr <= wfhw->arr) {
/*
- * requested period is small than the currently
+ * requested period is smaller than the currently
* configured and unchangable period, report back the smallest
- * possible period, i.e. the current state; Initialize
- * ccr to anything valid.
+ * possible period, i.e. the current state and return 1
+ * to indicate the wrong rounding direction.
*/
- wfhw->ccr = 0;
ret = 1;
- goto out;
}
} else {
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 21fa7ac849e5..4904b831c0a7 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -302,11 +302,17 @@ static struct airq_info *new_airq_info(int index)
static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
- int i, j;
+ int i, j, queue_idx, highest_queue_idx = -1;
struct airq_info *info;
unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
+ /* Array entries without an actual queue pointer must be ignored. */
+ for (i = 0; i < nvqs; i++) {
+ if (vqs[i])
+ highest_queue_idx++;
+ }
+
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
@@ -316,7 +322,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
if (!info)
return NULL;
write_lock_irqsave(&info->lock, flags);
- bit = airq_iv_alloc(info->aiv, nvqs);
+ bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
if (bit == -1UL) {
/* Not enough vacancies. */
write_unlock_irqrestore(&info->lock, flags);
@@ -325,8 +331,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
*first = bit;
*airq_info = info;
indicator_addr = info->aiv->vector;
- for (j = 0; j < nvqs; j++) {
- airq_iv_set_ptr(info->aiv, bit + j,
+ for (j = 0, queue_idx = 0; j < nvqs; j++) {
+ if (!vqs[j])
+ continue;
+ airq_iv_set_ptr(info->aiv, bit + queue_idx++,
(unsigned long)vqs[j]);
}
write_unlock_irqrestore(&info->lock, flags);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 5c59fddb32c1..b5ecffcaf795 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -949,24 +949,20 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
if (ret)
- goto err_destroy_mutex;
+ goto err_put_ctrl;
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
- goto err_destroy_mutex;
+ goto err_put_ctrl;
return 0;
-err_destroy_mutex:
- mutex_destroy(&q->lock);
-
err_disable_clk:
fsl_qspi_clk_disable_unprep(q);
err_put_ctrl:
spi_controller_put(ctlr);
- dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f7d6f47971fd..24f485827e03 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -278,7 +278,7 @@ config XEN_PRIVCMD_EVENTFD
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 65d4e7fa1eb8..8c852807ba1c 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -679,7 +679,7 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_ballooned_pages);
-static void __init balloon_add_regions(void)
+static int __init balloon_add_regions(void)
{
unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
@@ -702,26 +702,38 @@ static void __init balloon_add_regions(void)
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
balloon_append(pfn_to_page(pfn));
- balloon_stats.total_pages += extra_pfn_end - start_pfn;
+ /*
+ * Extra regions are accounted for in the physmap, but need
+ * decreasing from current_pages to balloon down the initial
+ * allocation, because they are already accounted for in
+ * total_pages.
+ */
+ if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) {
+ WARN(1, "Extra pages underflow current target");
+ return -ERANGE;
+ }
+ balloon_stats.current_pages -= extra_pfn_end - start_pfn;
}
+
+ return 0;
}
static int __init balloon_init(void)
{
struct task_struct *task;
+ int rc;
if (!xen_domain())
return -ENODEV;
pr_info("Initialising balloon driver\n");
-#ifdef CONFIG_XEN_PV
- balloon_stats.current_pages = xen_pv_domain()
- ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
- : get_num_physpages();
-#else
- balloon_stats.current_pages = get_num_physpages();
-#endif
+ if (xen_released_pages >= get_num_physpages()) {
+ WARN(1, "Released pages underflow current target");
+ return -ERANGE;
+ }
+
+ balloon_stats.current_pages = get_num_physpages() - xen_released_pages;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
@@ -738,7 +750,9 @@ static int __init balloon_init(void)
register_sysctl_init("xen/balloon", balloon_table);
#endif
- balloon_add_regions();
+ rc = balloon_add_regions();
+ if (rc)
+ return rc;
task = kthread_run(balloon_thread, NULL, "xen-balloon");
if (IS_ERR(task)) {
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index fcb335bb7b18..6d1819269cbe 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -513,4 +513,5 @@ static int __init boot_wait_for_devices(void)
late_initcall(boot_wait_for_devices);
#endif
+MODULE_DESCRIPTION("Xen PV-device frontend support");
MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index bf1c94e51dd0..07709b0d7688 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -4,7 +4,7 @@ config BCACHEFS_FS
depends on BLOCK
select EXPORTFS
select CLOSURES
- select LIBCRC32C
+ select CRC32
select CRC64
select FS_POSIX_ACL
select LZ4_COMPRESS
@@ -15,10 +15,9 @@ config BCACHEFS_FS
select ZLIB_INFLATE
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
- select CRYPTO
select CRYPTO_LIB_SHA256
- select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
select KEYS
select RAID6_PQ
select XOR_BLOCKS
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 5d9f208a1bb7..5cb0fc384ac0 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -981,8 +981,8 @@ struct bch_fs {
mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR];
size_t zstd_workspace_size;
- struct crypto_sync_skcipher *chacha20;
- struct crypto_shash *poly1305;
+ struct bch_key chacha20_key;
+ bool chacha20_key_set;
atomic64_t key_version;
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index d1ad1a7613c9..7d6c971db23c 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -644,8 +644,6 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
*/
static int journal_sort_key_cmp(const void *_l, const void *_r)
{
- cond_resched();
-
const struct journal_key *l = _l;
const struct journal_key *r = _r;
@@ -689,7 +687,8 @@ void bch2_journal_keys_put(struct bch_fs *c)
static void __journal_keys_sort(struct journal_keys *keys)
{
- sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
+ sort_nonatomic(keys->data, keys->nr, sizeof(keys->data[0]),
+ journal_sort_key_cmp, NULL);
cond_resched();
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 8c9fdb7263fe..86acf037590c 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -183,7 +183,7 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
return;
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
- if (!c->chacha20)
+ if (!c->chacha20_key_set)
return;
struct nonce nonce = btree_nonce(&bn->keys, 0);
@@ -398,7 +398,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
bch2_print_string_as_lines(KERN_INFO, buf.buf);
}
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
+ sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
dst = 0;
darray_for_each(f->nodes, i) {
@@ -418,7 +418,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
}
f->nodes.nr = dst;
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+ sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
if (0 && c->opts.verbose) {
printbuf_reset(&buf);
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index adbe576ec77e..0941fb2c026d 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -428,10 +428,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
*/
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
- sort(wb->flushing.keys.data,
- wb->flushing.keys.nr,
- sizeof(wb->flushing.keys.data[0]),
- wb_key_seq_cmp, NULL);
+ sort_nonatomic(wb->flushing.keys.data,
+ wb->flushing.keys.nr,
+ sizeof(wb->flushing.keys.data[0]),
+ wb_key_seq_cmp, NULL);
darray_for_each(wb->flushing.keys, i) {
if (!i->journal_seq)
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 3726689093e3..d0a34a097b80 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -7,17 +7,12 @@
#include "super-io.h"
#include <linux/crc32c.h>
-#include <linux/crypto.h>
#include <linux/xxhash.h>
#include <linux/key.h>
#include <linux/random.h>
#include <linux/ratelimit.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
#include <crypto/chacha.h>
-#include <crypto/hash.h>
#include <crypto/poly1305.h>
-#include <crypto/skcipher.h>
#include <keys/user-type.h>
/*
@@ -96,116 +91,40 @@ static void bch2_checksum_update(struct bch2_checksum_state *state, const void *
}
}
-static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- struct scatterlist *sg, size_t len)
+static void bch2_chacha20_init(u32 state[CHACHA_STATE_WORDS],
+ const struct bch_key *key, struct nonce nonce)
{
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ u32 key_words[CHACHA_KEY_SIZE / sizeof(u32)];
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
+ BUILD_BUG_ON(sizeof(key_words) != sizeof(*key));
+ memcpy(key_words, key, sizeof(key_words));
+ le32_to_cpu_array(key_words, ARRAY_SIZE(key_words));
- int ret = crypto_skcipher_encrypt(req);
- if (ret)
- pr_err("got error %i from crypto_skcipher_encrypt()", ret);
-
- return ret;
-}
-
-static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- void *buf, size_t len)
-{
- if (!is_vmalloc_addr(buf)) {
- struct scatterlist sg = {};
-
- sg_mark_end(&sg);
- sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
- return do_encrypt_sg(tfm, nonce, &sg, len);
- } else {
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
- int ret;
-
- darray_init(&sgl);
-
- while (len) {
- unsigned offset = offset_in_page(buf);
- struct scatterlist sg = {
- .page_link = (unsigned long) vmalloc_to_page(buf),
- .offset = offset,
- .length = min(len, PAGE_SIZE - offset),
- };
+ BUILD_BUG_ON(sizeof(nonce) != CHACHA_IV_SIZE);
+ chacha_init(state, key_words, (const u8 *)nonce.d);
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
- BUG_ON(darray_push(&sgl, sg));
- }
-
- buf += sg.length;
- len -= sg.length;
- sgl_len += sg.length;
- }
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
- return ret;
- }
+ memzero_explicit(key_words, sizeof(key_words));
}
-int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
- void *buf, size_t len)
+static void bch2_chacha20(const struct bch_key *key, struct nonce nonce,
+ void *data, size_t len)
{
- struct crypto_sync_skcipher *chacha20 =
- crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret;
-
- ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
- return ret;
- }
-
- ret = crypto_skcipher_setkey(&chacha20->base,
- (void *) key, sizeof(*key));
- if (ret) {
- pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
- goto err;
- }
+ u32 state[CHACHA_STATE_WORDS];
- ret = do_encrypt(chacha20, nonce, buf, len);
-err:
- crypto_free_sync_skcipher(chacha20);
- return ret;
+ bch2_chacha20_init(state, key, nonce);
+ chacha20_crypt(state, data, data, len);
+ memzero_explicit(state, sizeof(state));
}
-static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
- struct nonce nonce)
+static void bch2_poly1305_init(struct poly1305_desc_ctx *desc,
+ struct bch_fs *c, struct nonce nonce)
{
- u8 key[POLY1305_KEY_SIZE];
- int ret;
+ u8 key[POLY1305_KEY_SIZE] = { 0 };
nonce.d[3] ^= BCH_NONCE_POLY;
- memset(key, 0, sizeof(key));
- ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
- if (ret)
- return ret;
-
- desc->tfm = c->poly1305;
- crypto_shash_init(desc);
- crypto_shash_update(desc, key, sizeof(key));
- return 0;
+ bch2_chacha20(&c->chacha20_key, nonce, key, sizeof(key));
+ poly1305_init(desc, key);
}
struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
@@ -230,14 +149,13 @@ struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
+ struct poly1305_desc_ctx dctx;
u8 digest[POLY1305_DIGEST_SIZE];
struct bch_csum ret = { 0 };
- gen_poly_key(c, desc, nonce);
-
- crypto_shash_update(desc, data, len);
- crypto_shash_final(desc, digest);
+ bch2_poly1305_init(&dctx, c, nonce);
+ poly1305_update(&dctx, data, len);
+ poly1305_final(&dctx, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
return ret;
@@ -253,11 +171,12 @@ int bch2_encrypt(struct bch_fs *c, unsigned type,
if (!bch2_csum_type_is_encryption(type))
return 0;
- if (bch2_fs_inconsistent_on(!c->chacha20,
+ if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
- return do_encrypt(c->chacha20, nonce, data, len);
+ bch2_chacha20(&c->chacha20_key, nonce, data, len);
+ return 0;
}
static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
@@ -296,26 +215,26 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
+ struct poly1305_desc_ctx dctx;
u8 digest[POLY1305_DIGEST_SIZE];
struct bch_csum ret = { 0 };
- gen_poly_key(c, desc, nonce);
+ bch2_poly1305_init(&dctx, c, nonce);
#ifdef CONFIG_HIGHMEM
__bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
- crypto_shash_update(desc, p, bv.bv_len);
+ poly1305_update(&dctx, p, bv.bv_len);
kunmap_local(p);
}
#else
__bio_for_each_bvec(bv, bio, *iter, *iter)
- crypto_shash_update(desc,
+ poly1305_update(&dctx,
page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
#endif
- crypto_shash_final(desc, digest);
+ poly1305_final(&dctx, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
return ret;
@@ -338,43 +257,33 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
{
struct bio_vec bv;
struct bvec_iter iter;
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
+ u32 chacha_state[CHACHA_STATE_WORDS];
int ret = 0;
- if (bch2_fs_inconsistent_on(!c->chacha20,
+ if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
- darray_init(&sgl);
+ bch2_chacha20_init(chacha_state, &c->chacha20_key, nonce);
bio_for_each_segment(bv, bio, iter) {
- struct scatterlist sg = {
- .page_link = (unsigned long) bv.bv_page,
- .offset = bv.bv_offset,
- .length = bv.bv_len,
- };
-
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
-
- BUG_ON(darray_push(&sgl, sg));
+ void *p;
+
+ /*
+ * chacha_crypt() assumes that the length is a multiple of
+ * CHACHA_BLOCK_SIZE on any non-final call.
+ */
+ if (!IS_ALIGNED(bv.bv_len, CHACHA_BLOCK_SIZE)) {
+ bch_err_ratelimited(c, "bio not aligned for encryption");
+ ret = -EIO;
+ break;
}
- sgl_len += sg.length;
+ p = bvec_kmap_local(&bv);
+ chacha20_crypt(chacha_state, p, p, bv.bv_len);
+ kunmap_local(p);
}
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
+ memzero_explicit(chacha_state, sizeof(chacha_state));
return ret;
}
@@ -650,10 +559,7 @@ int bch2_decrypt_sb_key(struct bch_fs *c,
}
/* decrypt real key: */
- ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
- &sb_key, sizeof(sb_key));
- if (ret)
- goto err;
+ bch2_chacha20(&user_key, bch2_sb_key_nonce(c), &sb_key, sizeof(sb_key));
if (bch2_key_is_encrypted(&sb_key)) {
bch_err(c, "incorrect encryption key");
@@ -668,31 +574,6 @@ err:
return ret;
}
-static int bch2_alloc_ciphers(struct bch_fs *c)
-{
- if (c->chacha20)
- return 0;
-
- struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
- return ret;
- }
-
- struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
- ret = PTR_ERR_OR_ZERO(poly1305);
- if (ret) {
- bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
- crypto_free_sync_skcipher(chacha20);
- return ret;
- }
-
- c->chacha20 = chacha20;
- c->poly1305 = poly1305;
- return 0;
-}
-
#if 0
/*
@@ -797,35 +678,21 @@ err:
void bch2_fs_encryption_exit(struct bch_fs *c)
{
- if (c->poly1305)
- crypto_free_shash(c->poly1305);
- if (c->chacha20)
- crypto_free_sync_skcipher(c->chacha20);
+ memzero_explicit(&c->chacha20_key, sizeof(c->chacha20_key));
}
int bch2_fs_encryption_init(struct bch_fs *c)
{
struct bch_sb_field_crypt *crypt;
- struct bch_key key;
- int ret = 0;
+ int ret;
crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
if (!crypt)
- goto out;
+ return 0;
- ret = bch2_alloc_ciphers(c);
+ ret = bch2_decrypt_sb_key(c, crypt, &c->chacha20_key);
if (ret)
- goto out;
-
- ret = bch2_decrypt_sb_key(c, crypt, &key);
- if (ret)
- goto out;
-
- ret = crypto_skcipher_setkey(&c->chacha20->base,
- (void *) &key.key, sizeof(key.key));
- if (ret)
- goto out;
-out:
- memzero_explicit(&key, sizeof(key));
- return ret;
+ return ret;
+ c->chacha20_key_set = true;
+ return 0;
}
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 4ac251c8fcd8..1310782d3ae9 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -69,7 +69,6 @@ static inline void bch2_csum_err_msg(struct printbuf *out,
bch2_csum_to_text(out, type, expected);
}
-int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
int bch2_request_key(struct bch_sb *, struct bch_key *);
#ifndef __KERNEL__
int bch2_revoke_key(struct bch_sb *);
@@ -156,7 +155,7 @@ static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
if (type >= BCH_CSUM_NR)
return false;
- if (bch2_csum_type_is_encryption(type) && !c->chacha20)
+ if (bch2_csum_type_is_encryption(type) && !c->chacha20_key_set)
return false;
return true;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index de02ebf847ec..b211c97238ab 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -607,7 +607,7 @@ void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update
prt_newline(out);
printbuf_indent_add(out, 2);
bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
- prt_printf(out, "read_done:\t\%u\n", m->read_done);
+ prt_printf(out, "read_done:\t%u\n", m->read_done);
bch2_write_op_to_text(out, &m->op);
printbuf_indent_sub(out, 2);
}
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index bf53a029f356..8488a7578115 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -287,8 +287,8 @@ static void dirent_init_casefolded_name(struct bkey_i_dirent *dirent,
EBUG_ON(!dirent->v.d_casefold);
EBUG_ON(!cf_name->len);
- dirent->v.d_cf_name_block.d_name_len = name->len;
- dirent->v.d_cf_name_block.d_cf_name_len = cf_name->len;
+ dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
+ dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[name->len], cf_name->name, cf_name->len);
memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_name->len], 0,
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 19d4599918dc..e3a75dcca60c 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -225,11 +225,26 @@ static void bchfs_read(struct btree_trans *trans,
bch2_read_extent(trans, rbio, iter.pos,
data_btree, k, offset_into_extent, flags);
- swap(rbio->bio.bi_iter.bi_size, bytes);
+ /*
+ * Careful there's a landmine here if bch2_read_extent() ever
+ * starts returning transaction restarts here.
+ *
+ * We've changed rbio->bi_iter.bi_size to be "bytes we can read
+ * from this extent" with the swap call, and we restore it
+ * below. That restore needs to come before checking for
+ * errors.
+ *
+ * But unlike __bch2_read(), we use the rbio bvec iter, not one
+ * on the stack, so we can't do the restore right after the
+ * bch2_read_extent() call: we don't own that iterator anymore
+ * if BCH_READ_last_fragment is set, since we may have submitted
+ * that rbio instead of cloning it.
+ */
if (flags & BCH_READ_last_fragment)
break;
+ swap(rbio->bio.bi_iter.bi_size, bytes);
bio_advance(&rbio->bio, bytes);
err:
if (ret &&
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 417bb0c7bbfa..fd627c8d1053 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -977,7 +977,8 @@ retry_pick:
goto err;
}
- if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20) {
+ if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) &&
+ !c->chacha20_key_set) {
struct printbuf buf = PRINTBUF;
bch2_read_err_msg_trans(trans, &buf, orig, read_pos);
prt_printf(&buf, "attempting to read encrypted data without encryption key\n ");
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 1b7961f4f609..2a54ac79189b 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1460,7 +1460,7 @@ fsck_err:
static void journal_advance_devs_to_next_bucket(struct journal *j,
struct dev_alloc_list *devs,
- unsigned sectors, u64 seq)
+ unsigned sectors, __le64 seq)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 79fd18a5a07c..d2b07f602da9 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -389,9 +389,9 @@ int bch2_journal_replay(struct bch_fs *c)
* Now, replay any remaining keys in the order in which they appear in
* the journal, unpinning those journal entries as we go:
*/
- sort(keys_sorted.data, keys_sorted.nr,
- sizeof(keys_sorted.data[0]),
- journal_sort_seq_cmp, NULL);
+ sort_nonatomic(keys_sorted.data, keys_sorted.nr,
+ sizeof(keys_sorted.data[0]),
+ journal_sort_seq_cmp, NULL);
darray_for_each(keys_sorted, kp) {
cond_resched();
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index a58edde43bee..b79e80a435e0 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -70,14 +70,10 @@
#include <linux/percpu.h>
#include <linux/random.h>
#include <linux/sysfs.h>
-#include <crypto/hash.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_DESCRIPTION("bcachefs filesystem");
-MODULE_SOFTDEP("pre: chacha20");
-MODULE_SOFTDEP("pre: poly1305");
-MODULE_SOFTDEP("pre: xxhash");
const char * const bch2_fs_flag_strs[] = {
#define x(n) #n,
@@ -1002,12 +998,6 @@ static void print_mount_opts(struct bch_fs *c)
prt_str(&p, "starting version ");
bch2_version_to_text(&p, c->sb.version);
- if (c->opts.read_only) {
- prt_str(&p, " opts=");
- first = false;
- prt_printf(&p, "ro");
- }
-
for (i = 0; i < bch2_opts_nr; i++) {
const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i);
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index fa8515598341..73a2dfb854c5 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -3,9 +3,9 @@
config BTRFS_FS
tristate "Btrfs filesystem support"
select BLK_CGROUP_PUNT_BIO
+ select CRC32
select CRYPTO
select CRYPTO_CRC32C
- select LIBCRC32C
select CRYPTO_XXHASH
select CRYPTO_SHA256
select CRYPTO_BLAKE2B
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 7249d70e1a43..3e7def3d31c1 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,7 +3,7 @@ config CEPH_FS
tristate "Ceph distributed file system"
depends on INET
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
select NETFS_SUPPORT
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 331e49cd1b8d..8f68ec49ad89 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -3,8 +3,8 @@
config EROFS_FS
tristate "EROFS filesystem support"
depends on BLOCK
+ select CRC32
select FS_IOMAP
- select LIBCRC32C
help
EROFS (Enhanced Read-Only File System) is a lightweight read-only
file system with modern designs (e.g. no buffer heads, inline
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index 9581e9bf8192..767fb4acdc93 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -56,7 +56,7 @@ struct erofs_super_block {
union {
__le16 rootnid_2b; /* nid of root directory */
__le16 blocks_hi; /* (48BIT on) blocks count MSB */
- } rb;
+ } __packed rb;
__le64 inos; /* total valid ino # (== f_files - f_favail) */
__le64 epoch; /* base seconds used for compact inodes */
__le32 fixed_nsec; /* fixed nanoseconds for compact inodes */
@@ -148,7 +148,7 @@ union erofs_inode_i_nb {
__le16 nlink; /* if EROFS_I_NLINK_1_BIT is unset */
__le16 blocks_hi; /* total blocks count MSB */
__le16 startblk_hi; /* starting block number MSB */
-};
+} __packed;
/* 32-byte reduced form of an ondisk inode */
struct erofs_inode_compact {
@@ -369,9 +369,9 @@ struct z_erofs_map_header {
* bit 7 : pack the whole file into packed inode
*/
__u8 h_clusterbits;
- };
+ } __packed;
__le16 h_extents_hi; /* extent count MSB */
- };
+ } __packed;
};
enum {
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index bec4b56b3826..4fa0a0121288 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -32,6 +32,8 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
ret = 0;
}
if (rq->bio.bi_end_io) {
+ if (ret < 0 && !rq->bio.bi_status)
+ rq->bio.bi_status = errno_to_blk_status(ret);
rq->bio.bi_end_io(&rq->bio);
} else {
bio_for_each_folio_all(fi, &rq->bio) {
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 0671184d9cf1..5c061aaeeb45 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -725,7 +725,6 @@ static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
lockref_init(&pcl->lockref); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->pclustersize = map->m_plen;
- pcl->pageofs_in = pageofs_in;
pcl->length = 0;
pcl->partial = true;
pcl->next = fe->head;
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 8de50df05dfe..14ea47f954f5 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -559,7 +559,8 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
pos += sizeof(__le64);
lstart = 0;
} else {
- lstart = map->m_la >> vi->z_lclusterbits;
+ lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
+ pos += (lstart >> vi->z_lclusterbits) * recsz;
pa = EROFS_NULL_ADDR;
}
@@ -614,7 +615,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
map->m_flags |= EROFS_MAP_MAPPED | EROFS_MAP_FRAGMENT;
vi->z_fragmentoff = map->m_plen;
- if (recsz >= offsetof(struct z_erofs_extent, pstart_lo))
+ if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
vi->z_fragmentoff |= map->m_pa << 32;
} else if (map->m_plen) {
map->m_flags |= EROFS_MAP_MAPPED |
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 87ee3a17bd29..e8c5525afc67 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -351,10 +351,9 @@ int ext4_check_blockref(const char *function, unsigned int line,
{
__le32 *bref = p;
unsigned int blk;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ if (journal && inode == journal->j_inode)
return 0;
while (bref < p+max) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1dc09ed5d403..94c7d2d828a6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -386,10 +386,11 @@ static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+
+ if (journal && inode == journal->j_inode)
return 0;
+
if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock %llu "
@@ -4724,22 +4725,43 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
inode_set_iversion_queried(inode, val);
}
-static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
-
+static int check_igot_inode(struct inode *inode, ext4_iget_flags flags,
+ const char *function, unsigned int line)
{
+ const char *err_str;
+
if (flags & EXT4_IGET_EA_INODE) {
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
- return "missing EA_INODE flag";
+ if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ err_str = "missing EA_INODE flag";
+ goto error;
+ }
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
- EXT4_I(inode)->i_file_acl)
- return "ea_inode with extended attributes";
+ EXT4_I(inode)->i_file_acl) {
+ err_str = "ea_inode with extended attributes";
+ goto error;
+ }
} else {
- if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
- return "unexpected EA_INODE flag";
+ if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ /*
+ * open_by_handle_at() could provide an old inode number
+ * that has since been reused for an ea_inode; this does
+ * not indicate filesystem corruption
+ */
+ if (flags & EXT4_IGET_HANDLE)
+ return -ESTALE;
+ err_str = "unexpected EA_INODE flag";
+ goto error;
+ }
+ }
+ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
+ err_str = "unexpected bad inode w/o EXT4_IGET_BAD";
+ goto error;
}
- if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
- return "unexpected bad inode w/o EXT4_IGET_BAD";
- return NULL;
+ return 0;
+
+error:
+ ext4_error_inode(inode, function, line, 0, err_str);
+ return -EFSCORRUPTED;
}
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
@@ -4751,7 +4773,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
struct ext4_inode_info *ei;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct inode *inode;
- const char *err_str;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
loff_t size;
@@ -4780,10 +4801,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
- if ((err_str = check_igot_inode(inode, flags)) != NULL) {
- ext4_error_inode(inode, function, line, 0, err_str);
+ ret = check_igot_inode(inode, flags, function, line);
+ if (ret) {
iput(inode);
- return ERR_PTR(-EFSCORRUPTED);
+ return ERR_PTR(ret);
}
return inode;
}
@@ -5065,13 +5086,21 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
- if ((err_str = check_igot_inode(inode, flags)) != NULL) {
- ext4_error_inode(inode, function, line, 0, err_str);
- ret = -EFSCORRUPTED;
- goto bad_inode;
+ ret = check_igot_inode(inode, flags, function, line);
+ /*
+ * -ESTALE here means there is nothing inherently wrong with the inode,
+ * it's just not an inode we can return for an fhandle lookup.
+ */
+ if (ret == -ESTALE) {
+ brelse(iloc.bh);
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(-ESTALE);
}
-
+ if (ret)
+ goto bad_inode;
brelse(iloc.bh);
+
unlock_new_inode(inode);
return inode;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 0d523e9fb3d5..f88424c28194 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3037,10 +3037,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
unsigned char blocksize_bits = min_t(unsigned char,
sb->s_blocksize_bits,
EXT4_MAX_BLOCK_LOG_SIZE);
- struct sg {
- struct ext4_group_info info;
- ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
- } sg;
+ DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters,
+ EXT4_MAX_BLOCK_LOG_SIZE + 2);
group--;
if (group == 0)
@@ -3048,7 +3046,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
" 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
" 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
- i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
+ i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) +
sizeof(struct ext4_group_info);
grinfo = ext4_get_group_info(sb, group);
@@ -3068,14 +3066,14 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
* We care only about free space counters in the group info and
* these are safe to access even after the buddy has been unloaded
*/
- memcpy(&sg, grinfo, i);
- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
- sg.info.bb_fragments, sg.info.bb_first_free);
+ memcpy(sg, grinfo, i);
+ seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free,
+ sg->bb_fragments, sg->bb_first_free);
for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
- sg.info.bb_counters[i] : 0);
+ sg->bb_counters[i] : 0);
seq_puts(seq, " ]");
- if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
+ if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg))
seq_puts(seq, " Block bitmap corrupted!");
seq_putc(seq, '\n');
return 0;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index cb5cb33b1d91..e9712e64ec8f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1971,7 +1971,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
* split it in half by count; each resulting block will have at least
* half the space free.
*/
- if (i > 0)
+ if (i >= 0)
split = count - move;
else
split = count/2;
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index be7f87a8e11a..7bd231d16d4a 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,7 +4,6 @@ config GFS2_FS
select BUFFER_HEAD
select FS_POSIX_ACL
select CRC32
- select LIBCRC32C
select QUOTACTL
select FS_IOMAP
help
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index e69968e88fe7..35892df7335c 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -704,18 +704,12 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
cifs_free_hash(&server->secmech.md5);
cifs_free_hash(&server->secmech.sha512);
- if (!SERVER_IS_CHAN(server)) {
- if (server->secmech.enc) {
- crypto_free_aead(server->secmech.enc);
- server->secmech.enc = NULL;
- }
-
- if (server->secmech.dec) {
- crypto_free_aead(server->secmech.dec);
- server->secmech.dec = NULL;
- }
- } else {
+ if (server->secmech.enc) {
+ crypto_free_aead(server->secmech.enc);
server->secmech.enc = NULL;
+ }
+ if (server->secmech.dec) {
+ crypto_free_aead(server->secmech.dec);
server->secmech.dec = NULL;
}
}
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 07c4688ec4c9..3b32116b0b49 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -625,10 +625,8 @@ struct smb_version_operations {
bool (*is_status_io_timeout)(char *buf);
/* Check for STATUS_NETWORK_NAME_DELETED */
bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
- int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data);
+ struct reparse_data_buffer * (*get_reparse_point_buffer)(const struct kvec *rsp_iov,
+ u32 *plen);
int (*create_reparse_symlink)(const unsigned int xid,
struct inode *inode,
struct dentry *dentry,
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 48d0d6f439cf..18d67ab113f0 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2256,6 +2256,8 @@ typedef struct {
#define FILE_SUPPORTS_ENCRYPTION 0x00020000
#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_POSIX_UNLINK_RENAME 0x00000400
+#define FILE_RETURNS_CLEANUP_RESULT_INFO 0x00000200
#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index f298e86a3c1f..4a0b2d220fe8 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -2556,6 +2556,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
return 0;
if (tcon->nodelete != ctx->nodelete)
return 0;
+ if (tcon->posix_extensions != ctx->linux_ext)
+ return 0;
return 1;
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index a00a9d91d0da..75be4b46bc6f 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1203,18 +1203,17 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
goto out;
}
break;
- case IO_REPARSE_TAG_MOUNT_POINT:
- cifs_create_junction_fattr(fattr, sb);
- rc = 0;
- goto out;
default:
/* Check for cached reparse point data */
if (data->symlink_target || data->reparse.buf) {
rc = 0;
- } else if (iov && server->ops->parse_reparse_point) {
- rc = server->ops->parse_reparse_point(cifs_sb,
- full_path,
- iov, data);
+ } else if (iov && server->ops->get_reparse_point_buffer) {
+ struct reparse_data_buffer *reparse_buf;
+ u32 reparse_len;
+
+ reparse_buf = server->ops->get_reparse_point_buffer(iov, &reparse_len);
+ rc = parse_reparse_point(reparse_buf, reparse_len,
+ cifs_sb, full_path, data);
/*
* If the reparse point was not handled but it is the
* name surrogate which points to directory, then treat
@@ -1228,6 +1227,16 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
cifs_create_junction_fattr(fattr, sb);
goto out;
}
+ /*
+ * If the reparse point is unsupported by the Linux SMB
+ * client then let it process by the SMB server. So mask
+ * the -EOPNOTSUPP error code. This will allow Linux SMB
+ * client to send SMB OPEN request to server. If server
+ * does not support this reparse point too then server
+ * will return error during open the path.
+ */
+ if (rc == -EOPNOTSUPP)
+ rc = 0;
}
if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) {
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 2b9e9885dc42..bb25e77c5540 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -542,12 +542,12 @@ static int wsl_set_reparse_buf(struct reparse_data_buffer **buf,
kfree(symname_utf16);
return -ENOMEM;
}
- /* Flag 0x02000000 is unknown, but all wsl symlinks have this value */
- symlink_buf->Flags = cpu_to_le32(0x02000000);
- /* PathBuffer is in UTF-8 but without trailing null-term byte */
+ /* Version field must be set to 2 (MS-FSCC 2.1.2.7) */
+ symlink_buf->Version = cpu_to_le32(2);
+ /* Target for Version 2 is in UTF-8 but without trailing null-term byte */
symname_utf8_len = utf16s_to_utf8s((wchar_t *)symname_utf16, symname_utf16_len/2,
UTF16_LITTLE_ENDIAN,
- symlink_buf->PathBuffer,
+ symlink_buf->Target,
symname_utf8_maxlen);
*buf = (struct reparse_data_buffer *)symlink_buf;
buf_len = sizeof(struct reparse_wsl_symlink_data_buffer) + symname_utf8_len;
@@ -1016,29 +1016,36 @@ static int parse_reparse_wsl_symlink(struct reparse_wsl_symlink_data_buffer *buf
struct cifs_open_info_data *data)
{
int len = le16_to_cpu(buf->ReparseDataLength);
+ int data_offset = offsetof(typeof(*buf), Target) - offsetof(typeof(*buf), Version);
int symname_utf8_len;
__le16 *symname_utf16;
int symname_utf16_len;
- if (len <= sizeof(buf->Flags)) {
+ if (len <= data_offset) {
cifs_dbg(VFS, "srv returned malformed wsl symlink buffer\n");
return -EIO;
}
- /* PathBuffer is in UTF-8 but without trailing null-term byte */
- symname_utf8_len = len - sizeof(buf->Flags);
+ /* MS-FSCC 2.1.2.7 defines layout of the Target field only for Version 2. */
+ if (le32_to_cpu(buf->Version) != 2) {
+ cifs_dbg(VFS, "srv returned unsupported wsl symlink version %u\n", le32_to_cpu(buf->Version));
+ return -EIO;
+ }
+
+ /* Target for Version 2 is in UTF-8 but without trailing null-term byte */
+ symname_utf8_len = len - data_offset;
/*
* Check that buffer does not contain null byte
* because Linux cannot process symlink with null byte.
*/
- if (strnlen(buf->PathBuffer, symname_utf8_len) != symname_utf8_len) {
+ if (strnlen(buf->Target, symname_utf8_len) != symname_utf8_len) {
cifs_dbg(VFS, "srv returned null byte in wsl symlink target location\n");
return -EIO;
}
symname_utf16 = kzalloc(symname_utf8_len * 2, GFP_KERNEL);
if (!symname_utf16)
return -ENOMEM;
- symname_utf16_len = utf8s_to_utf16s(buf->PathBuffer, symname_utf8_len,
+ symname_utf16_len = utf8s_to_utf16s(buf->Target, symname_utf8_len,
UTF16_LITTLE_ENDIAN,
(wchar_t *) symname_utf16, symname_utf8_len * 2);
if (symname_utf16_len < 0) {
@@ -1062,8 +1069,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
const char *full_path,
struct cifs_open_info_data *data)
{
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
-
data->reparse.buf = buf;
/* See MS-FSCC 2.1.2 */
@@ -1090,24 +1095,17 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
}
return 0;
default:
- cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
- le32_to_cpu(buf->ReparseTag));
return -EOPNOTSUPP;
}
}
-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data)
+struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov,
+ u32 *plen)
{
- struct reparse_data_buffer *buf;
struct smb2_ioctl_rsp *io = rsp_iov->iov_base;
- u32 plen = le32_to_cpu(io->OutputCount);
-
- buf = (struct reparse_data_buffer *)((u8 *)io +
- le32_to_cpu(io->OutputOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
+ *plen = le32_to_cpu(io->OutputCount);
+ return (struct reparse_data_buffer *)((u8 *)io +
+ le32_to_cpu(io->OutputOffset));
}
static bool wsl_to_fattr(struct cifs_open_info_data *data,
@@ -1233,16 +1231,6 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
bool ok;
switch (tag) {
- case IO_REPARSE_TAG_INTERNAL:
- if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
- return false;
- fallthrough;
- case IO_REPARSE_TAG_DFS:
- case IO_REPARSE_TAG_DFSR:
- case IO_REPARSE_TAG_MOUNT_POINT:
- /* See cifs_create_junction_fattr() */
- fattr->cf_mode = S_IFDIR | 0711;
- break;
case IO_REPARSE_TAG_LX_SYMLINK:
case IO_REPARSE_TAG_LX_FIFO:
case IO_REPARSE_TAG_AF_UNIX:
@@ -1262,7 +1250,14 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
fattr->cf_mode |= S_IFLNK;
break;
default:
- return false;
+ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
+ return false;
+ if (!IS_REPARSE_TAG_NAME_SURROGATE(tag) &&
+ tag != IO_REPARSE_TAG_INTERNAL)
+ return false;
+ /* See cifs_create_junction_fattr() */
+ fattr->cf_mode = S_IFDIR | 0711;
+ break;
}
fattr->cf_dtype = S_DT(fattr->cf_mode);
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index c0be5ab45a78..08de853b36a8 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -135,9 +135,6 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data);
+struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov, u32 *len);
#endif /* _CIFS_REPARSE_H */
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index f2ca5963cd9d..b3fa9ee26912 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -680,6 +680,22 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
*pbcc_area = bcc_ptr;
}
+static void
+ascii_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
+{
+ char *bcc_ptr = *pbcc_area;
+
+ strcpy(bcc_ptr, "Linux version ");
+ bcc_ptr += strlen("Linux version ");
+ strcpy(bcc_ptr, init_utsname()->release);
+ bcc_ptr += strlen(init_utsname()->release) + 1;
+
+ strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+ bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+
+ *pbcc_area = bcc_ptr;
+}
+
static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -704,6 +720,25 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
*pbcc_area = bcc_ptr;
}
+static void ascii_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
+{
+ char *bcc_ptr = *pbcc_area;
+ int len;
+
+ /* copy domain */
+ if (ses->domainName != NULL) {
+ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
+ } /* else we send a null domain name so server will default to its own domain */
+ *bcc_ptr = 0;
+ bcc_ptr++;
+
+ *pbcc_area = bcc_ptr;
+}
+
static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -749,25 +784,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
- /* copy domain */
- if (ses->domainName != NULL) {
- len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- if (WARN_ON_ONCE(len < 0))
- len = CIFS_MAX_DOMAINNAME_LEN - 1;
- bcc_ptr += len;
- } /* else we send a null domain name so server will default to its own domain */
- *bcc_ptr = 0;
- bcc_ptr++;
-
/* BB check for overflow here */
- strcpy(bcc_ptr, "Linux version ");
- bcc_ptr += strlen("Linux version ");
- strcpy(bcc_ptr, init_utsname()->release);
- bcc_ptr += strlen(init_utsname()->release) + 1;
-
- strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
- bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+ ascii_domain_string(&bcc_ptr, ses, nls_cp);
+ ascii_oslm_strings(&bcc_ptr, nls_cp);
*pbcc_area = bcc_ptr;
}
@@ -1570,7 +1590,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
sess_data->iov[1].iov_len = msg->secblob_len;
pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
- if (ses->capabilities & CAP_UNICODE) {
+ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
/* unicode strings must be word aligned */
if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
@@ -1579,8 +1599,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
} else {
- /* BB: is this right? */
- ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ ascii_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
}
sess_data->iov[2].iov_len = (long) bcc_ptr -
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 26df807fbe7a..0adeec652dc1 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -568,6 +568,42 @@ static int cifs_query_path_info(const unsigned int xid,
data->reparse_point = le32_to_cpu(fi.Attributes) & ATTR_REPARSE;
}
+#ifdef CONFIG_CIFS_XATTR
+ /*
+ * For WSL CHR and BLK reparse points it is required to fetch
+ * EA $LXDEV which contains major and minor device numbers.
+ */
+ if (!rc && data->reparse_point) {
+ struct smb2_file_full_ea_info *ea;
+
+ ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
+ rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
+ &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
+ SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
+ if (rc == SMB2_WSL_XATTR_DEV_SIZE) {
+ ea->next_entry_offset = cpu_to_le32(0);
+ ea->flags = 0;
+ ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
+ ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
+ memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
+ data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+ SMB2_WSL_XATTR_DEV_SIZE;
+ rc = 0;
+ } else if (rc >= 0) {
+ /* It is an error if EA $LXDEV has wrong size. */
+ rc = -EINVAL;
+ } else {
+ /*
+ * In all other cases ignore error if fetching
+ * of EA $LXDEV failed. It is needed only for
+ * WSL CHR and BLK reparse points and wsl_to_fattr()
+ * handle the case when EA is missing.
+ */
+ rc = 0;
+ }
+ }
+#endif
+
return rc;
}
@@ -970,18 +1006,13 @@ static int cifs_query_symlink(const unsigned int xid,
return rc;
}
-static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data)
+static struct reparse_data_buffer *cifs_get_reparse_point_buffer(const struct kvec *rsp_iov,
+ u32 *plen)
{
- struct reparse_data_buffer *buf;
TRANSACT_IOCTL_RSP *io = rsp_iov->iov_base;
- u32 plen = le16_to_cpu(io->ByteCount);
-
- buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
- le32_to_cpu(io->DataOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
+ *plen = le16_to_cpu(io->ByteCount);
+ return (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
+ le32_to_cpu(io->DataOffset));
}
static bool
@@ -1157,7 +1188,7 @@ struct smb_version_operations smb1_operations = {
.rename = CIFSSMBRename,
.create_hardlink = CIFSCreateHardLink,
.query_symlink = cifs_query_symlink,
- .parse_reparse_point = cifs_parse_reparse_point,
+ .get_reparse_point_buffer = cifs_get_reparse_point_buffer,
.open = cifs_open_file,
.set_fid = cifs_set_fid,
.close = cifs_close_file,
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 41d8cd20b25f..2fe8eeb98535 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4555,9 +4555,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
return rc;
}
} else {
- if (unlikely(!server->secmech.dec))
- return -EIO;
-
+ rc = smb3_crypto_aead_allocate(server);
+ if (unlikely(rc))
+ return rc;
tfm = server->secmech.dec;
}
@@ -5303,7 +5303,7 @@ struct smb_version_operations smb20_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5406,7 +5406,7 @@ struct smb_version_operations smb21_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5513,7 +5513,7 @@ struct smb_version_operations smb30_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5629,7 +5629,7 @@ struct smb_version_operations smb311_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 81e05db8e4d5..c4d52bebd37d 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -1252,15 +1252,8 @@ SMB2_negotiate(const unsigned int xid,
cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
}
- if (server->cipher_type && !rc) {
- if (!SERVER_IS_CHAN(server)) {
- rc = smb3_crypto_aead_allocate(server);
- } else {
- /* For channels, just reuse the primary server crypto secmech. */
- server->secmech.enc = server->primary_server->secmech.enc;
- server->secmech.dec = server->primary_server->secmech.dec;
- }
- }
+ if (server->cipher_type && !rc)
+ rc = smb3_crypto_aead_allocate(server);
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 764dca80c15c..f79a5165a7cc 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -1567,13 +1567,13 @@ struct reparse_nfs_data_buffer {
__u8 DataBuffer[];
} __packed;
-/* For IO_REPARSE_TAG_LX_SYMLINK */
+/* For IO_REPARSE_TAG_LX_SYMLINK - see MS-FSCC 2.1.2.7 */
struct reparse_wsl_symlink_data_buffer {
__le32 ReparseTag;
__le16 ReparseDataLength;
__u16 Reserved;
- __le32 Flags;
- __u8 PathBuffer[]; /* Variable Length UTF-8 string without nul-term */
+ __le32 Version; /* Always 2 */
+ __u8 Target[]; /* Variable Length UTF-8 string without nul-term */
} __packed;
struct validate_negotiate_info_req {
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index a3d8a905b07e..d742ba754348 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -72,6 +72,8 @@
#define FILE_SUPPORTS_ENCRYPTION 0x00020000
#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_POSIX_UNLINK_RENAME 0x00000400
+#define FILE_RETURNS_CLEANUP_RESULT_INFO 0x00000200
#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index fffd6fffdce0..ae0ca6858496 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -3,7 +3,7 @@ config XFS_FS
tristate "XFS filesystem support"
depends on BLOCK
select EXPORTFS
- select LIBCRC32C
+ select CRC32
select FS_IOMAP
help
XFS is a high performance journaling filesystem which originated
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 1cda7281f300..4948379237e9 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -126,6 +126,9 @@ int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx);
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode);
+
struct drm_display_mode *
drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
u8 video_code);
diff --git a/include/drm/intel/intel-gtt.h b/include/drm/intel/intel-gtt.h
index cb0d5b7200c7..f53bcff01f22 100644
--- a/include/drm/intel/intel-gtt.h
+++ b/include/drm/intel/intel-gtt.h
@@ -28,6 +28,8 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 4736ea525048..d212848d07f3 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -850,6 +850,7 @@
MACRO__(0xE20C, ## __VA_ARGS__), \
MACRO__(0xE20D, ## __VA_ARGS__), \
MACRO__(0xE210, ## __VA_ARGS__), \
+ MACRO__(0xE211, ## __VA_ARGS__), \
MACRO__(0xE212, ## __VA_ARGS__), \
MACRO__(0xE215, ## __VA_ARGS__), \
MACRO__(0xE216, ## __VA_ARGS__)
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 0ffb97c78566..39c768f87dc9 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -67,7 +67,7 @@ enum kunit_status {
/*
* Speed Attribute is stored as an enum and separated into categories of
- * speed: very_slowm, slow, and normal. These speeds are relative to
+ * speed: very_slow, slow, and normal. These speeds are relative to
* other KUnit tests.
*
* Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL.
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 485b651869d9..5bc8f55c8cca 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -710,6 +710,7 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_killed)(struct cgroup_subsys_state *css);
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 28e999f2c642..e7da3c3b098b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -344,7 +344,7 @@ static inline u64 cgroup_id(const struct cgroup *cgrp)
*/
static inline bool css_is_dying(struct cgroup_subsys_state *css)
{
- return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+ return css->flags & CSS_DYING;
}
static inline void cgroup_get(struct cgroup *cgrp)
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 45b651c05b9c..8adc8e9cb4a7 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -31,6 +31,7 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+/* GPIOD_FLAGS_BIT_NONEXCLUSIVE is DEPRECATED, don't use in new code. */
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1adcba3ddd76..1ef867bb8c44 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -345,7 +345,7 @@ static inline void hrtimer_update_function(struct hrtimer *timer,
if (WARN_ON_ONCE(!function))
return;
#endif
- timer->function = function;
+ ACCESS_PRIVATE(timer, function) = function;
}
/* Forward a hrtimer so it expires after now: */
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h
deleted file mode 100644
index ea4e087fac98..000000000000
--- a/include/linux/irqchip/irq-davinci-aintc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019 Texas Instruments
- */
-
-#ifndef _LINUX_IRQ_DAVINCI_AINTC_
-#define _LINUX_IRQ_DAVINCI_AINTC_
-
-#include <linux/ioport.h>
-
-/**
- * struct davinci_aintc_config - configuration data for davinci-aintc driver.
- *
- * @reg: register range to map
- * @num_irqs: number of HW interrupts supported by the controller
- * @prios: an array of size num_irqs containing priority settings for
- * each interrupt
- */
-struct davinci_aintc_config {
- struct resource reg;
- unsigned int num_irqs;
- u8 *prios;
-};
-
-void davinci_aintc_init(const struct davinci_aintc_config *config);
-
-#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5438a1b446a6..291d49b9bf05 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2382,7 +2382,7 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 1e748958dad4..311f145eb4e8 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -67,7 +67,7 @@
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1), \
- __VA_OPT__(SPI_MEM_OP_MAX_FREQ(__VA_ARGS__)))
+ SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cf3b6445817b..2d11d013cabe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4429,6 +4429,7 @@ void linkwatch_fire_event(struct net_device *dev);
* pending work list (if queued).
*/
void linkwatch_sync_dev(struct net_device *dev);
+void __linkwatch_sync_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -4974,6 +4975,7 @@ void dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
int dev_set_allmulti(struct net_device *dev, int inc);
+void netif_state_change(struct net_device *dev);
void netdev_state_change(struct net_device *dev);
void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5a9bf15d4461..0069ba6866a4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -823,7 +823,6 @@ struct perf_event {
struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
- struct rcuwait pending_work_wait;
atomic_t event_limit;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index ccaaf4c7d5f6..ea39dd23a197 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -240,6 +240,6 @@ rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
}
-void netdev_set_operstate(struct net_device *dev, int newstate);
+void netif_set_operstate(struct net_device *dev, int newstate);
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 31248cfdfb23..dcd288fa1bb6 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,6 +775,7 @@ struct sctp_transport {
/* Reference counting. */
refcount_t refcnt;
+ __u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@@ -784,7 +785,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag.
*/
- __u32 rto_pending:1,
+ rto_pending:1,
/*
* hb_sent : a flag that signals that we have a pending
diff --git a/include/net/sock.h b/include/net/sock.h
index 8daf1b3b12c6..694f954258d4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -339,6 +339,8 @@ struct sk_filter;
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
* @sk_user_frags: xarray of pages the user is holding a reference on.
+ * @sk_owner: reference to the real owner of the socket that calls
+ * sock_lock_init_class_and_name().
*/
struct sock {
/*
@@ -547,6 +549,10 @@ struct sock {
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
struct xarray sk_user_frags;
+
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+ struct module *sk_owner;
+#endif
};
struct sock_bh_locked {
@@ -1583,6 +1589,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
sk_mem_reclaim(sk);
}
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+ __module_get(owner);
+ sk->sk_owner = owner;
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+ sk->sk_owner = NULL;
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+ module_put(sk->sk_owner);
+}
+#else
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+}
+#endif
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -1592,13 +1627,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
+ sk_owner_set(sk, THIS_MODULE); \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
- sizeof((sk)->sk_lock)); \
+ sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
- (skey), (sname)); \
+ (skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
index eee3d2a4dbe4..ff0c06b6513e 100644
--- a/include/vdso/unaligned.h
+++ b/include/vdso/unaligned.h
@@ -2,14 +2,14 @@
#ifndef __VDSO_UNALIGNED_H
#define __VDSO_UNALIGNED_H
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr); \
+ __get_pptr->x; \
})
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr); \
+ __put_pptr->x = (val); \
} while (0)
#endif /* __VDSO_UNALIGNED_H */
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 098109259671..953d5e742569 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -504,6 +504,8 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
p->nbufs = tmp;
p->addr = READ_ONCE(sqe->addr);
p->len = READ_ONCE(sqe->len);
+ if (!p->len)
+ return -EINVAL;
if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
&size))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 5e64a8bb30a4..b36c8825550e 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -175,6 +175,18 @@ void io_rsrc_cache_free(struct io_ring_ctx *ctx)
io_alloc_cache_free(&ctx->imu_cache, kfree);
}
+static void io_clear_table_tags(struct io_rsrc_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ struct io_rsrc_node *node = data->nodes[i];
+
+ if (node)
+ node->tag = 0;
+ }
+}
+
__cold void io_rsrc_data_free(struct io_ring_ctx *ctx,
struct io_rsrc_data *data)
{
@@ -583,6 +595,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
return 0;
fail:
+ io_clear_table_tags(&ctx->file_table.data);
io_sqe_files_unregister(ctx);
return ret;
}
@@ -902,8 +915,10 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
}
ctx->buf_table = data;
- if (ret)
+ if (ret) {
+ io_clear_table_tags(&ctx->buf_table);
io_sqe_buffers_unregister(ctx);
+ }
return ret;
}
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 80d4a6f71d29..0f46e0404c04 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -181,7 +181,7 @@ static void io_zcrx_free_area(struct io_zcrx_area *area)
kvfree(area->nia.niovs);
kvfree(area->user_refs);
if (area->pages) {
- unpin_user_pages(area->pages, area->nia.num_niovs);
+ unpin_user_pages(area->pages, area->nr_folios);
kvfree(area->pages);
}
kfree(area);
@@ -192,7 +192,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
struct io_uring_zcrx_area_reg *area_reg)
{
struct io_zcrx_area *area;
- int i, ret, nr_pages;
+ int i, ret, nr_pages, nr_iovs;
struct iovec iov;
if (area_reg->flags || area_reg->rq_area_token)
@@ -220,27 +220,28 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
area->pages = NULL;
goto err;
}
- area->nia.num_niovs = nr_pages;
+ area->nr_folios = nr_iovs = nr_pages;
+ area->nia.num_niovs = nr_iovs;
- area->nia.niovs = kvmalloc_array(nr_pages, sizeof(area->nia.niovs[0]),
+ area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
GFP_KERNEL | __GFP_ZERO);
if (!area->nia.niovs)
goto err;
- area->freelist = kvmalloc_array(nr_pages, sizeof(area->freelist[0]),
+ area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
GFP_KERNEL | __GFP_ZERO);
if (!area->freelist)
goto err;
- for (i = 0; i < nr_pages; i++)
+ for (i = 0; i < nr_iovs; i++)
area->freelist[i] = i;
- area->user_refs = kvmalloc_array(nr_pages, sizeof(area->user_refs[0]),
+ area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
GFP_KERNEL | __GFP_ZERO);
if (!area->user_refs)
goto err;
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_iovs; i++) {
struct net_iov *niov = &area->nia.niovs[i];
niov->owner = &area->nia;
@@ -248,7 +249,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
atomic_set(&area->user_refs[i], 0);
}
- area->free_count = nr_pages;
+ area->free_count = nr_iovs;
area->ifq = ifq;
/* we're only supporting one area per ifq for now */
area->area_id = 0;
diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h
index 706cc7300780..47f1c0e8c197 100644
--- a/io_uring/zcrx.h
+++ b/io_uring/zcrx.h
@@ -15,6 +15,7 @@ struct io_zcrx_area {
bool is_mapped;
u16 area_id;
struct page **pages;
+ unsigned long nr_folios;
/* freelist */
spinlock_t freelist_lock ____cacheline_aligned_in_smp;
@@ -26,11 +27,11 @@ struct io_zcrx_ifq {
struct io_ring_ctx *ctx;
struct io_zcrx_area *area;
+ spinlock_t rq_lock ____cacheline_aligned_in_smp;
struct io_uring *rq_ring;
struct io_uring_zcrx_rqe *rqes;
- u32 rq_entries;
u32 cached_rq_head;
- spinlock_t rq_lock;
+ u32 rq_entries;
u32 if_rxq;
struct device *dev;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..9a5f94371e50 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -9,13 +9,14 @@
#include <linux/slab.h>
#include <linux/btf_ids.h>
#include "percpu_freelist.h"
+#include <asm/rqspinlock.h>
#define QUEUE_STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
struct bpf_queue_stack {
struct bpf_map map;
- raw_spinlock_t lock;
+ rqspinlock_t lock;
u32 head, tail;
u32 size; /* max_entries + 1 */
@@ -78,7 +79,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
qs->size = size;
- raw_spin_lock_init(&qs->lock);
+ raw_res_spin_lock_init(&qs->lock);
return &qs->map;
}
@@ -98,12 +99,8 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
int err = 0;
void *ptr;
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, flags))
- return -EBUSY;
- } else {
- raw_spin_lock_irqsave(&qs->lock, flags);
- }
+ if (raw_res_spin_lock_irqsave(&qs->lock, flags))
+ return -EBUSY;
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
@@ -120,7 +117,7 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
}
out:
- raw_spin_unlock_irqrestore(&qs->lock, flags);
+ raw_res_spin_unlock_irqrestore(&qs->lock, flags);
return err;
}
@@ -133,12 +130,8 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
void *ptr;
u32 index;
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, flags))
- return -EBUSY;
- } else {
- raw_spin_lock_irqsave(&qs->lock, flags);
- }
+ if (raw_res_spin_lock_irqsave(&qs->lock, flags))
+ return -EBUSY;
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
@@ -157,7 +150,7 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
qs->head = index;
out:
- raw_spin_unlock_irqrestore(&qs->lock, flags);
+ raw_res_spin_unlock_irqrestore(&qs->lock, flags);
return err;
}
@@ -203,12 +196,8 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL;
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
- return -EBUSY;
- } else {
- raw_spin_lock_irqsave(&qs->lock, irq_flags);
- }
+ if (raw_res_spin_lock_irqsave(&qs->lock, irq_flags))
+ return -EBUSY;
if (queue_stack_map_is_full(qs)) {
if (!replace) {
@@ -227,7 +216,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
qs->head = 0;
out:
- raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
+ raw_res_spin_unlock_irqrestore(&qs->lock, irq_flags);
return err;
}
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 1499d8caa9a3..719d73299397 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -11,6 +11,7 @@
#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>
#include <linux/btf_ids.h>
+#include <asm/rqspinlock.h>
#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
@@ -29,7 +30,7 @@ struct bpf_ringbuf {
u64 mask;
struct page **pages;
int nr_pages;
- raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
+ rqspinlock_t spinlock ____cacheline_aligned_in_smp;
/* For user-space producer ring buffers, an atomic_t busy bit is used
* to synchronize access to the ring buffers in the kernel, rather than
* the spinlock that is used for kernel-producer ring buffers. This is
@@ -173,7 +174,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
if (!rb)
return NULL;
- raw_spin_lock_init(&rb->spinlock);
+ raw_res_spin_lock_init(&rb->spinlock);
atomic_set(&rb->busy, 0);
init_waitqueue_head(&rb->waitq);
init_irq_work(&rb->work, bpf_ringbuf_notify);
@@ -416,12 +417,8 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
cons_pos = smp_load_acquire(&rb->consumer_pos);
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
- return NULL;
- } else {
- raw_spin_lock_irqsave(&rb->spinlock, flags);
- }
+ if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
+ return NULL;
pend_pos = rb->pending_pos;
prod_pos = rb->producer_pos;
@@ -446,7 +443,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
*/
if (new_prod_pos - cons_pos > rb->mask ||
new_prod_pos - pend_pos > rb->mask) {
- raw_spin_unlock_irqrestore(&rb->spinlock, flags);
+ raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
return NULL;
}
@@ -458,7 +455,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
/* pairs with consumer's smp_load_acquire() */
smp_store_release(&rb->producer_pos, new_prod_pos);
- raw_spin_unlock_irqrestore(&rb->spinlock, flags);
+ raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
return (void *)hdr + BPF_RINGBUF_HDR_SZ;
}
diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c
index b896c4a75a5c..338305c8852c 100644
--- a/kernel/bpf/rqspinlock.c
+++ b/kernel/bpf/rqspinlock.c
@@ -253,7 +253,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
})
#else
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
- ({ (ret) = check_timeout(&(ts)); })
+ ({ (ret) = check_timeout((lock), (mask), &(ts)); })
#endif
/*
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 27f08aa17b56..3caf2cd86e65 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5923,6 +5923,12 @@ static void kill_css(struct cgroup_subsys_state *css)
if (css->flags & CSS_DYING)
return;
+ /*
+ * Call css_killed(), if defined, before setting the CSS_DYING flag
+ */
+ if (css->ss->css_killed)
+ css->ss->css_killed(css);
+
css->flags |= CSS_DYING;
/*
diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h
index 976a8bc3ff60..383963e28ac6 100644
--- a/kernel/cgroup/cpuset-internal.h
+++ b/kernel/cgroup/cpuset-internal.h
@@ -33,6 +33,7 @@ enum prs_errcode {
PERR_CPUSEMPTY,
PERR_HKEEPING,
PERR_ACCESS,
+ PERR_REMOTE,
};
/* bits in struct cpuset flags field */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 39c1fc643d77..306b60430091 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -61,10 +61,17 @@ static const char * const perr_strings[] = {
[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
[PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
[PERR_ACCESS] = "Enable partition not permitted",
+ [PERR_REMOTE] = "Have remote partition underneath",
};
/*
- * Exclusive CPUs distributed out to sub-partitions of top_cpuset
+ * For local partitions, update to subpartitions_cpus & isolated_cpus is done
+ * in update_parent_effective_cpumask(). For remote partitions, it is done in
+ * the remote_partition_*() and remote_cpus_update() helpers.
+ */
+/*
+ * Exclusive CPUs distributed out to local or remote sub-partitions of
+ * top_cpuset
*/
static cpumask_var_t subpartitions_cpus;
@@ -86,7 +93,6 @@ static struct list_head remote_children;
* A flag to force sched domain rebuild at the end of an operation.
* It can be set in
* - update_partition_sd_lb()
- * - remote_partition_check()
* - update_cpumasks_hier()
* - cpuset_update_flag()
* - cpuset_hotplug_update_tasks()
@@ -1089,9 +1095,14 @@ void cpuset_reset_sched_domains(void)
*
* Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_mutex held,
- * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
- * is used instead of effective_cpus to make sure all offline CPUs are also
- * included as hotplug code won't update cpumasks for tasks in top_cpuset.
+ * cpuset membership stays stable.
+ *
+ * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
+ * to make sure all offline CPUs are also included as hotplug code won't
+ * update cpumasks for tasks in top_cpuset.
+ *
+ * As task_cpu_possible_mask() can be task dependent in arm64, we have to
+ * do cpu masking per task instead of doing it once for all.
*/
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
{
@@ -1151,7 +1162,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
*
* Return: 0 if successful, an error code otherwise
*/
-static int update_partition_exclusive(struct cpuset *cs, int new_prs)
+static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
{
bool exclusive = (new_prs > PRS_MEMBER);
@@ -1234,12 +1245,12 @@ static void reset_partition_data(struct cpuset *cs)
}
/*
- * partition_xcpus_newstate - Exclusive CPUs state change
+ * isolated_cpus_update - Update the isolated_cpus mask
* @old_prs: old partition_root_state
* @new_prs: new partition_root_state
* @xcpus: exclusive CPUs with state change
*/
-static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus)
+static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
{
WARN_ON_ONCE(old_prs == new_prs);
if (new_prs == PRS_ISOLATED)
@@ -1273,8 +1284,8 @@ static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
isolcpus_updated = (new_prs != parent->partition_root_state);
if (isolcpus_updated)
- partition_xcpus_newstate(parent->partition_root_state, new_prs,
- xcpus);
+ isolated_cpus_update(parent->partition_root_state, new_prs,
+ xcpus);
cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
return isolcpus_updated;
@@ -1304,8 +1315,8 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
isolcpus_updated = (old_prs != parent->partition_root_state);
if (isolcpus_updated)
- partition_xcpus_newstate(old_prs, parent->partition_root_state,
- xcpus);
+ isolated_cpus_update(old_prs, parent->partition_root_state,
+ xcpus);
cpumask_and(xcpus, xcpus, cpu_active_mask);
cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
@@ -1340,20 +1351,57 @@ EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
* compute_effective_exclusive_cpumask - compute effective exclusive CPUs
* @cs: cpuset
* @xcpus: effective exclusive CPUs value to be set
- * Return: true if xcpus is not empty, false otherwise.
+ * @real_cs: the real cpuset (can be NULL)
+ * Return: 0 if there is no sibling conflict, > 0 otherwise
*
- * Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set),
- * it must be a subset of parent's effective_xcpus.
+ * If exclusive_cpus isn't explicitly set or a real_cs is provided, we have to
+ * scan the sibling cpusets and exclude their exclusive_cpus or effective_xcpus
+ * as well. The provision of real_cs means that a cpumask is being changed and
+ * the given cs is a trial one.
*/
-static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
- struct cpumask *xcpus)
+static int compute_effective_exclusive_cpumask(struct cpuset *cs,
+ struct cpumask *xcpus,
+ struct cpuset *real_cs)
{
+ struct cgroup_subsys_state *css;
struct cpuset *parent = parent_cs(cs);
+ struct cpuset *sibling;
+ int retval = 0;
if (!xcpus)
xcpus = cs->effective_xcpus;
- return cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
+ cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
+
+ if (!real_cs) {
+ if (!cpumask_empty(cs->exclusive_cpus))
+ return 0;
+ } else {
+ cs = real_cs;
+ }
+
+ /*
+ * Exclude exclusive CPUs from siblings
+ */
+ rcu_read_lock();
+ cpuset_for_each_child(sibling, css, parent) {
+ if (sibling == cs)
+ continue;
+
+ if (!cpumask_empty(sibling->exclusive_cpus) &&
+ cpumask_intersects(xcpus, sibling->exclusive_cpus)) {
+ cpumask_andnot(xcpus, xcpus, sibling->exclusive_cpus);
+ retval++;
+ continue;
+ }
+ if (!cpumask_empty(sibling->effective_xcpus) &&
+ cpumask_intersects(xcpus, sibling->effective_xcpus)) {
+ cpumask_andnot(xcpus, xcpus, sibling->effective_xcpus);
+ retval++;
+ }
+ }
+ rcu_read_unlock();
+ return retval;
}
static inline bool is_remote_partition(struct cpuset *cs)
@@ -1395,7 +1443,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
* remote partition root underneath it, its exclusive_cpus must
* have overlapped with subpartitions_cpus.
*/
- compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
+ compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL);
if (cpumask_empty(tmp->new_cpus) ||
cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
@@ -1404,8 +1452,11 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
list_add(&cs->remote_sibling, &remote_children);
+ cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
+ cpuset_force_rebuild();
+ cs->prs_err = 0;
/*
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1428,20 +1479,24 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
{
bool isolcpus_updated;
- compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
WARN_ON_ONCE(!is_remote_partition(cs));
- WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus));
+ WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
spin_lock_irq(&callback_lock);
list_del_init(&cs->remote_sibling);
isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
- NULL, tmp->new_cpus);
- cs->partition_root_state = -cs->partition_root_state;
- if (!cs->prs_err)
- cs->prs_err = PERR_INVCPUS;
+ NULL, cs->effective_xcpus);
+ if (cs->prs_err)
+ cs->partition_root_state = -cs->partition_root_state;
+ else
+ cs->partition_root_state = PRS_MEMBER;
+
+ /* effective_xcpus may need to be changed */
+ compute_effective_exclusive_cpumask(cs, NULL, NULL);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
+ cpuset_force_rebuild();
/*
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1453,14 +1508,15 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
/*
* remote_cpus_update - cpus_exclusive change of remote partition
* @cs: the cpuset to be updated
- * @newmask: the new effective_xcpus mask
+ * @xcpus: the new exclusive_cpus mask, if non-NULL
+ * @excpus: the new effective_xcpus mask
* @tmp: temporary masks
*
* top_cpuset and subpartitions_cpus will be updated or partition can be
* invalidated.
*/
-static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
- struct tmpmasks *tmp)
+static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
+ struct cpumask *excpus, struct tmpmasks *tmp)
{
bool adding, deleting;
int prs = cs->partition_root_state;
@@ -1471,29 +1527,45 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
- if (cpumask_empty(newmask))
+ if (cpumask_empty(excpus)) {
+ cs->prs_err = PERR_CPUSEMPTY;
goto invalidate;
+ }
- adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
- deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
+ adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
+ deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
/*
* Additions of remote CPUs is only allowed if those CPUs are
* not allocated to other partitions and there are effective_cpus
* left in the top cpuset.
*/
- if (adding && (!capable(CAP_SYS_ADMIN) ||
- cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
- cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)))
- goto invalidate;
+ if (adding) {
+ if (!capable(CAP_SYS_ADMIN))
+ cs->prs_err = PERR_ACCESS;
+ else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
+ cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
+ cs->prs_err = PERR_NOCPUS;
+ if (cs->prs_err)
+ goto invalidate;
+ }
spin_lock_irq(&callback_lock);
if (adding)
isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
if (deleting)
isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
+ /*
+ * Need to update effective_xcpus and exclusive_cpus now as
+ * update_sibling_cpumasks() below may iterate back to the same cs.
+ */
+ cpumask_copy(cs->effective_xcpus, excpus);
+ if (xcpus)
+ cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
+ if (adding || deleting)
+ cpuset_force_rebuild();
/*
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1507,47 +1579,6 @@ invalidate:
}
/*
- * remote_partition_check - check if a child remote partition needs update
- * @cs: the cpuset to be updated
- * @newmask: the new effective_xcpus mask
- * @delmask: temporary mask for deletion (not in tmp)
- * @tmp: temporary masks
- *
- * This should be called before the given cs has updated its cpus_allowed
- * and/or effective_xcpus.
- */
-static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
- struct cpumask *delmask, struct tmpmasks *tmp)
-{
- struct cpuset *child, *next;
- int disable_cnt = 0;
-
- /*
- * Compute the effective exclusive CPUs that will be deleted.
- */
- if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) ||
- !cpumask_intersects(delmask, subpartitions_cpus))
- return; /* No deletion of exclusive CPUs in partitions */
-
- /*
- * Searching the remote children list to look for those that will
- * be impacted by the deletion of exclusive CPUs.
- *
- * Since a cpuset must be removed from the remote children list
- * before it can go offline and holding cpuset_mutex will prevent
- * any change in cpuset status. RCU read lock isn't needed.
- */
- lockdep_assert_held(&cpuset_mutex);
- list_for_each_entry_safe(child, next, &remote_children, remote_sibling)
- if (cpumask_intersects(child->effective_cpus, delmask)) {
- remote_partition_disable(child, tmp);
- disable_cnt++;
- }
- if (disable_cnt)
- cpuset_force_rebuild();
-}
-
-/*
* prstate_housekeeping_conflict - check for partition & housekeeping conflicts
* @prstate: partition root state to be checked
* @new_cpus: cpu mask
@@ -1601,7 +1632,7 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
* The partcmd_update command is used by update_cpumasks_hier() with newmask
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
* by update_cpumask() with NULL newmask. In both cases, the callers won't
- * check for error and so partition_root_state and prs_error will be updated
+ * check for error and so partition_root_state and prs_err will be updated
* directly.
*/
static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
@@ -1614,11 +1645,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
int old_prs, new_prs;
int part_error = PERR_NONE; /* Partition error? */
int subparts_delta = 0;
- struct cpumask *xcpus; /* cs effective_xcpus */
int isolcpus_updated = 0;
+ struct cpumask *xcpus = user_xcpus(cs);
bool nocpu;
lockdep_assert_held(&cpuset_mutex);
+ WARN_ON_ONCE(is_remote_partition(cs));
/*
* new_prs will only be changed for the partcmd_update and
@@ -1626,7 +1658,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
*/
adding = deleting = false;
old_prs = new_prs = cs->partition_root_state;
- xcpus = user_xcpus(cs);
if (cmd == partcmd_invalidate) {
if (is_prs_invalid(old_prs))
@@ -1661,12 +1692,19 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
/*
+ * Need to call compute_effective_exclusive_cpumask() in case
+ * exclusive_cpus not set. Sibling conflict should only happen
+ * if exclusive_cpus isn't set.
+ */
+ xcpus = tmp->new_cpus;
+ if (compute_effective_exclusive_cpumask(cs, xcpus, NULL))
+ WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
+
+ /*
* Enabling partition root is not allowed if its
- * effective_xcpus is empty or doesn't overlap with
- * parent's effective_xcpus.
+ * effective_xcpus is empty.
*/
- if (cpumask_empty(xcpus) ||
- !cpumask_intersects(xcpus, parent->effective_xcpus))
+ if (cpumask_empty(xcpus))
return PERR_INVCPUS;
if (prstate_housekeeping_conflict(new_prs, xcpus))
@@ -1679,19 +1717,22 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
if (nocpu)
return PERR_NOCPUS;
- cpumask_copy(tmp->delmask, xcpus);
- deleting = true;
- subparts_delta++;
+ deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus);
+ if (deleting)
+ subparts_delta++;
new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
} else if (cmd == partcmd_disable) {
/*
- * May need to add cpus to parent's effective_cpus for
- * valid partition root.
+ * May need to add cpus back to parent's effective_cpus
+ * (and maybe removed from subpartitions_cpus/isolated_cpus)
+ * for valid partition root. xcpus may contain CPUs that
+ * shouldn't be removed from the two global cpumasks.
*/
- adding = !is_prs_invalid(old_prs) &&
- cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus);
- if (adding)
+ if (is_partition_valid(cs)) {
+ cpumask_copy(tmp->addmask, cs->effective_xcpus);
+ adding = true;
subparts_delta--;
+ }
new_prs = PRS_MEMBER;
} else if (newmask) {
/*
@@ -1701,6 +1742,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
part_error = PERR_CPUSEMPTY;
goto write_error;
}
+
/* Check newmask again, whether cpus are available for parent/cs */
nocpu |= tasks_nocpu_error(parent, cs, newmask);
@@ -1829,7 +1871,7 @@ write_error:
* CPU lists in cs haven't been updated yet. So defer it to later.
*/
if ((old_prs != new_prs) && (cmd != partcmd_update)) {
- int err = update_partition_exclusive(cs, new_prs);
+ int err = update_partition_exclusive_flag(cs, new_prs);
if (err)
return err;
@@ -1867,7 +1909,7 @@ write_error:
update_unbound_workqueue_cpumask(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
- update_partition_exclusive(cs, new_prs);
+ update_partition_exclusive_flag(cs, new_prs);
if (adding || deleting) {
cpuset_update_tasks_cpumask(parent, tmp->addmask);
@@ -1917,7 +1959,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
* 2) All the effective_cpus will be used up and cp
* has tasks
*/
- compute_effective_exclusive_cpumask(cs, new_ecpus);
+ compute_effective_exclusive_cpumask(cs, new_ecpus, NULL);
cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
rcu_read_lock();
@@ -1925,6 +1967,11 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
if (!is_partition_valid(child))
continue;
+ /*
+ * There shouldn't be a remote partition underneath another
+ * partition root.
+ */
+ WARN_ON_ONCE(is_remote_partition(child));
child->prs_err = 0;
if (!cpumask_subset(child->effective_xcpus,
cs->effective_xcpus))
@@ -1980,32 +2027,39 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
bool remote = is_remote_partition(cp);
bool update_parent = false;
+ old_prs = new_prs = cp->partition_root_state;
+
/*
- * Skip descendent remote partition that acquires CPUs
- * directly from top cpuset unless it is cs.
+ * For child remote partition root (!= cs), we need to call
+ * remote_cpus_update() if effective_xcpus will be changed.
+ * Otherwise, we can skip the whole subtree.
+ *
+ * remote_cpus_update() will reuse tmp->new_cpus only after
+ * its value is being processed.
*/
if (remote && (cp != cs)) {
- pos_css = css_rightmost_descendant(pos_css);
- continue;
- }
+ compute_effective_exclusive_cpumask(cp, tmp->new_cpus, NULL);
+ if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
+ rcu_read_unlock();
+ remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
+ rcu_read_lock();
- /*
- * Update effective_xcpus if exclusive_cpus set.
- * The case when exclusive_cpus isn't set is handled later.
- */
- if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) {
- spin_lock_irq(&callback_lock);
- compute_effective_exclusive_cpumask(cp, NULL);
- spin_unlock_irq(&callback_lock);
+ /* Remote partition may be invalidated */
+ new_prs = cp->partition_root_state;
+ remote = (new_prs == old_prs);
}
- old_prs = new_prs = cp->partition_root_state;
- if (remote || (is_partition_valid(parent) &&
- is_partition_valid(cp)))
+ if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
compute_partition_effective_cpumask(cp, tmp->new_cpus);
else
compute_effective_cpumask(tmp->new_cpus, cp, parent);
+ if (remote)
+ goto get_css; /* Ready to update cpuset data */
+
/*
* A partition with no effective_cpus is allowed as long as
* there is no task associated with it. Call
@@ -2025,9 +2079,6 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
cpumask_copy(tmp->new_cpus, parent->effective_cpus);
- if (remote)
- goto get_css;
-
/*
* Skip the whole subtree if
* 1) the cpumask remains the same,
@@ -2088,6 +2139,9 @@ get_css:
spin_lock_irq(&callback_lock);
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
cp->partition_root_state = new_prs;
+ if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs))
+ compute_effective_exclusive_cpumask(cp, NULL, NULL);
+
/*
* Make sure effective_xcpus is properly set for a valid
* partition root.
@@ -2174,7 +2228,14 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
parent);
if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
continue;
+ } else if (is_remote_partition(sibling)) {
+ /*
+ * Change in a sibling cpuset won't affect a remote
+ * partition root.
+ */
+ continue;
}
+
if (!css_tryget_online(&sibling->css))
continue;
@@ -2231,8 +2292,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* trialcs->effective_xcpus is used as a temporary cpumask
* for checking validity of the partition root.
*/
+ trialcs->partition_root_state = PRS_MEMBER;
if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
- compute_effective_exclusive_cpumask(trialcs, NULL);
+ compute_effective_exclusive_cpumask(trialcs, NULL, cs);
}
/* Nothing to do if the cpus didn't change */
@@ -2305,19 +2367,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* Call remote_cpus_update() to handle valid remote partition
*/
if (is_remote_partition(cs))
- remote_cpus_update(cs, xcpus, &tmp);
+ remote_cpus_update(cs, NULL, xcpus, &tmp);
else if (invalidate)
update_parent_effective_cpumask(cs, partcmd_invalidate,
NULL, &tmp);
else
update_parent_effective_cpumask(cs, partcmd_update,
xcpus, &tmp);
- } else if (!cpumask_empty(cs->exclusive_cpus)) {
- /*
- * Use trialcs->effective_cpus as a temp cpumask
- */
- remote_partition_check(cs, trialcs->effective_xcpus,
- trialcs->effective_cpus, &tmp);
}
spin_lock_irq(&callback_lock);
@@ -2369,8 +2425,15 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
return 0;
- if (*buf)
- compute_effective_exclusive_cpumask(trialcs, NULL);
+ if (*buf) {
+ trialcs->partition_root_state = PRS_MEMBER;
+ /*
+ * Reject the change if there is exclusive CPUs conflict with
+ * the siblings.
+ */
+ if (compute_effective_exclusive_cpumask(trialcs, NULL, cs))
+ return -EINVAL;
+ }
/*
* Check all the descendants in update_cpumasks_hier() if
@@ -2401,8 +2464,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (invalidate)
remote_partition_disable(cs, &tmp);
else
- remote_cpus_update(cs, trialcs->effective_xcpus,
- &tmp);
+ remote_cpus_update(cs, trialcs->exclusive_cpus,
+ trialcs->effective_xcpus, &tmp);
} else if (invalidate) {
update_parent_effective_cpumask(cs, partcmd_invalidate,
NULL, &tmp);
@@ -2410,12 +2473,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
update_parent_effective_cpumask(cs, partcmd_update,
trialcs->effective_xcpus, &tmp);
}
- } else if (!cpumask_empty(trialcs->exclusive_cpus)) {
- /*
- * Use trialcs->effective_cpus as a temp cpumask
- */
- remote_partition_check(cs, trialcs->effective_xcpus,
- trialcs->effective_cpus, &tmp);
}
spin_lock_irq(&callback_lock);
cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
@@ -2782,7 +2839,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
int err = PERR_NONE, old_prs = cs->partition_root_state;
struct cpuset *parent = parent_cs(cs);
struct tmpmasks tmpmask;
- bool new_xcpus_state = false;
+ bool isolcpus_updated = false;
if (old_prs == new_prs)
return 0;
@@ -2796,18 +2853,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
if (alloc_cpumasks(NULL, &tmpmask))
return -ENOMEM;
- /*
- * Setup effective_xcpus if not properly set yet, it will be cleared
- * later if partition becomes invalid.
- */
- if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) {
- spin_lock_irq(&callback_lock);
- cpumask_and(cs->effective_xcpus,
- cs->cpus_allowed, parent->effective_xcpus);
- spin_unlock_irq(&callback_lock);
- }
-
- err = update_partition_exclusive(cs, new_prs);
+ err = update_partition_exclusive_flag(cs, new_prs);
if (err)
goto out;
@@ -2821,6 +2867,19 @@ static int update_prstate(struct cpuset *cs, int new_prs)
}
/*
+ * We don't support the creation of a new local partition with
+ * a remote partition underneath it. This unsupported
+ * setting can happen only if parent is the top_cpuset because
+ * a remote partition cannot be created underneath an existing
+ * local or remote partition.
+ */
+ if ((parent == &top_cpuset) &&
+ cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
+ err = PERR_REMOTE;
+ goto out;
+ }
+
+ /*
* If parent is valid partition, enable local partiion.
* Otherwise, enable a remote partition.
*/
@@ -2835,8 +2894,9 @@ static int update_prstate(struct cpuset *cs, int new_prs)
} else if (old_prs && new_prs) {
/*
* A change in load balance state only, no change in cpumasks.
+ * Need to update isolated_cpus.
*/
- new_xcpus_state = true;
+ isolcpus_updated = true;
} else {
/*
* Switching back to member is always allowed even if it
@@ -2860,7 +2920,7 @@ out:
*/
if (err) {
new_prs = -new_prs;
- update_partition_exclusive(cs, new_prs);
+ update_partition_exclusive_flag(cs, new_prs);
}
spin_lock_irq(&callback_lock);
@@ -2868,14 +2928,18 @@ out:
WRITE_ONCE(cs->prs_err, err);
if (!is_partition_valid(cs))
reset_partition_data(cs);
- else if (new_xcpus_state)
- partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
+ else if (isolcpus_updated)
+ isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(new_xcpus_state);
+ update_unbound_workqueue_cpumask(isolcpus_updated);
- /* Force update if switching back to member */
+ /* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
+ /* A newly created partition must have effective_xcpus set */
+ WARN_ON_ONCE(!old_prs && (new_prs > 0)
+ && cpumask_empty(cs->effective_xcpus));
+
/* Update sched domains and load balance flag */
update_partition_sd_lb(cs, old_prs);
@@ -3208,7 +3272,7 @@ int cpuset_common_seq_show(struct seq_file *sf, void *v)
return ret;
}
-static int sched_partition_show(struct seq_file *seq, void *v)
+static int cpuset_partition_show(struct seq_file *seq, void *v)
{
struct cpuset *cs = css_cs(seq_css(seq));
const char *err, *type = NULL;
@@ -3239,7 +3303,7 @@ static int sched_partition_show(struct seq_file *seq, void *v)
return 0;
}
-static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
+static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cpuset *cs = css_cs(of_css(of));
@@ -3260,11 +3324,8 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
css_get(&cs->css);
cpus_read_lock();
mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
- goto out_unlock;
-
- retval = update_prstate(cs, val);
-out_unlock:
+ if (is_cpuset_online(cs))
+ retval = update_prstate(cs, val);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
css_put(&cs->css);
@@ -3308,8 +3369,8 @@ static struct cftype dfl_files[] = {
{
.name = "cpus.partition",
- .seq_show = sched_partition_show,
- .write = sched_partition_write,
+ .seq_show = cpuset_partition_show,
+ .write = cpuset_partition_write,
.private = FILE_PARTITION_ROOT,
.flags = CFTYPE_NOT_ON_ROOT,
.file_offset = offsetof(struct cpuset, partition_file),
@@ -3475,9 +3536,6 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
cpus_read_lock();
mutex_lock(&cpuset_mutex);
- if (is_partition_valid(cs))
- update_prstate(cs, 0);
-
if (!cpuset_v2() && is_sched_load_balance(cs))
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
@@ -3488,6 +3546,22 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
cpus_read_unlock();
}
+static void cpuset_css_killed(struct cgroup_subsys_state *css)
+{
+ struct cpuset *cs = css_cs(css);
+
+ cpus_read_lock();
+ mutex_lock(&cpuset_mutex);
+
+ /* Reset valid partition back to member */
+ if (is_partition_valid(cs))
+ update_prstate(cs, PRS_MEMBER);
+
+ mutex_unlock(&cpuset_mutex);
+ cpus_read_unlock();
+
+}
+
static void cpuset_css_free(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
@@ -3609,6 +3683,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
.css_offline = cpuset_css_offline,
+ .css_killed = cpuset_css_killed,
.css_free = cpuset_css_free,
.can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach,
@@ -3739,10 +3814,10 @@ retry:
if (remote && cpumask_empty(&new_cpus) &&
partition_is_populated(cs, NULL)) {
+ cs->prs_err = PERR_HOTPLUG;
remote_partition_disable(cs, tmp);
compute_effective_cpumask(&new_cpus, cs, parent);
remote = false;
- cpuset_force_rebuild();
}
/*
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 4bb587d5d34f..b2239156b7de 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -318,10 +318,11 @@ __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
might_sleep();
for_each_possible_cpu(cpu) {
- struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
+ struct cgroup *pos;
/* Reacquire for each CPU to avoid disabling IRQs too long */
__cgroup_rstat_lock(cgrp, cpu);
+ pos = cgroup_rstat_updated_list(cgrp, cpu);
for (; pos; pos = pos->rstat_flush_next) {
struct cgroup_subsys_state *css;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 128db74e9eab..e93c19565914 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5518,30 +5518,6 @@ static bool exclusive_event_installable(struct perf_event *event,
static void perf_free_addr_filters(struct perf_event *event);
-static void perf_pending_task_sync(struct perf_event *event)
-{
- struct callback_head *head = &event->pending_task;
-
- if (!event->pending_work)
- return;
- /*
- * If the task is queued to the current task's queue, we
- * obviously can't wait for it to complete. Simply cancel it.
- */
- if (task_work_cancel(current, head)) {
- event->pending_work = 0;
- local_dec(&event->ctx->nr_no_switch_fast);
- return;
- }
-
- /*
- * All accesses related to the event are within the same RCU section in
- * perf_pending_task(). The RCU grace period before the event is freed
- * will make sure all those accesses are complete by then.
- */
- rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
-}
-
/* vs perf_event_alloc() error */
static void __free_event(struct perf_event *event)
{
@@ -5599,7 +5575,6 @@ static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
irq_work_sync(&event->pending_disable_irq);
- perf_pending_task_sync(event);
unaccount_event(event);
@@ -5692,10 +5667,17 @@ static void perf_remove_from_owner(struct perf_event *event)
static void put_event(struct perf_event *event)
{
+ struct perf_event *parent;
+
if (!atomic_long_dec_and_test(&event->refcount))
return;
+ parent = event->parent;
_free_event(event);
+
+ /* Matches the refcount bump in inherit_event() */
+ if (parent)
+ put_event(parent);
}
/*
@@ -5779,11 +5761,6 @@ again:
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
list_move(&child->child_list, &free_list);
- /*
- * This matches the refcount bump in inherit_event();
- * this can't be the last reference.
- */
- put_event(event);
} else {
var = &ctx->refcount;
}
@@ -5809,7 +5786,8 @@ again:
void *var = &child->ctx->refcount;
list_del(&child->child_list);
- free_event(child);
+ /* Last reference unless ->pending_task work is pending */
+ put_event(child);
/*
* Wake any perf_event_free_task() waiting for this event to be
@@ -5820,7 +5798,11 @@ again:
}
no_ctx:
- put_event(event); /* Must be the 'last' reference */
+ /*
+ * Last reference unless ->pending_task work is pending on this event
+ * or any of its children.
+ */
+ put_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -7236,12 +7218,6 @@ static void perf_pending_task(struct callback_head *head)
int rctx;
/*
- * All accesses to the event must belong to the same implicit RCU read-side
- * critical section as the ->pending_work reset. See comment in
- * perf_pending_task_sync().
- */
- rcu_read_lock();
- /*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
*/
@@ -7251,9 +7227,8 @@ static void perf_pending_task(struct callback_head *head)
event->pending_work = 0;
perf_sigtrap(event);
local_dec(&event->ctx->nr_no_switch_fast);
- rcuwait_wake_up(&event->pending_work_wait);
}
- rcu_read_unlock();
+ put_event(event);
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
@@ -10248,6 +10223,7 @@ static int __perf_event_overflow(struct perf_event *event,
!task_work_add(current, &event->pending_task, notify_mode)) {
event->pending_work = pending_id;
local_inc(&event->ctx->nr_no_switch_fast);
+ WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
event->pending_addr = 0;
if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
@@ -12610,7 +12586,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
init_irq_work(&event->pending_irq, perf_pending_irq);
event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
init_task_work(&event->pending_task, perf_pending_task);
- rcuwait_init(&event->pending_work_wait);
mutex_init(&event->mmap_mutex);
raw_spin_lock_init(&event->addr_filters.lock);
@@ -13747,8 +13722,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
* Kick perf_poll() for is_event_hup();
*/
perf_event_wakeup(parent_event);
- free_event(event);
- put_event(parent_event);
+ put_event(event);
return;
}
@@ -13872,13 +13846,11 @@ static void perf_free_event(struct perf_event *event,
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
- put_event(parent);
-
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
- free_event(event);
+ put_event(event);
}
/*
@@ -14016,6 +13988,9 @@ inherit_event(struct perf_event *parent_event,
if (IS_ERR(child_event))
return child_event;
+ get_ctx(child_ctx);
+ child_event->ctx = child_ctx;
+
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
if (IS_ERR(pmu_ctx)) {
free_event(child_event);
@@ -14037,8 +14012,6 @@ inherit_event(struct perf_event *parent_event,
return NULL;
}
- get_ctx(child_ctx);
-
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
@@ -14059,7 +14032,6 @@ inherit_event(struct perf_event *parent_event,
local64_set(&hwc->period_left, sample_period);
}
- child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 615b4e6d22c7..8d783b5882b6 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1956,6 +1956,9 @@ static void free_ret_instance(struct uprobe_task *utask,
* to-be-reused return instances for future uretprobes. If ri_timer()
* happens to be running right now, though, we fallback to safety and
* just perform RCU-delated freeing of ri.
+ * Admittedly, this is a rather simple use of seqcount, but it nicely
+ * abstracts away all the necessary memory barriers, so we use
+ * a well-supported kernel primitive here.
*/
if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
/* immediate reuse of ri without RCU GP is OK */
@@ -2016,12 +2019,20 @@ static void ri_timer(struct timer_list *timer)
/* RCU protects return_instance from freeing. */
guard(rcu)();
- write_seqcount_begin(&utask->ri_seqcount);
+ /*
+ * See free_ret_instance() for notes on seqcount use.
+ * We also employ raw API variants to avoid lockdep false-positive
+ * warning complaining about enabled preemption. The timer can only be
+ * invoked once for a uprobe_task. Therefore there can only be one
+ * writer. The reader does not require an even sequence count to make
+ * progress, so it is OK to remain preemptible on PREEMPT_RT.
+ */
+ raw_write_seqcount_begin(&utask->ri_seqcount);
for_each_ret_instance_rcu(ri, utask->return_instances)
hprobe_expire(&ri->hprobe, false);
- write_seqcount_end(&utask->ri_seqcount);
+ raw_write_seqcount_end(&utask->ri_seqcount);
}
static struct uprobe_task *alloc_utask(void)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 517ee2590a29..30899a8cc52c 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -366,7 +366,7 @@ static const struct debug_obj_descr hrtimer_debug_descr;
static void *hrtimer_debug_hint(void *addr)
{
- return ((struct hrtimer *) addr)->function;
+ return ACCESS_PRIVATE((struct hrtimer *)addr, function);
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index a47bcf71defc..9a3859443c04 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -509,6 +509,7 @@ void tick_resume(void)
#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
+static DEFINE_WAIT_OVERRIDE_MAP(tick_freeze_map, LD_WAIT_SLEEP);
static unsigned int tick_freeze_depth;
/**
@@ -528,9 +529,22 @@ void tick_freeze(void)
if (tick_freeze_depth == num_online_cpus()) {
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
+ /*
+ * All other CPUs have their interrupts disabled and are
+ * suspended to idle. Other tasks have been frozen so there
+ * is no scheduling happening. This means that there is no
+ * concurrency in the system at this point. Therefore it is
+ * okay to acquire a sleeping lock on PREEMPT_RT, such as a
+ * spinlock, because the lock cannot be held by other CPUs
+ * or threads and acquiring it cannot block.
+ *
+ * Inform lockdep about the situation.
+ */
+ lock_map_acquire_try(&tick_freeze_map);
system_state = SYSTEM_SUSPEND;
sched_clock_suspend();
timekeeping_suspend();
+ lock_map_release(&tick_freeze_map);
} else {
tick_suspend_local();
}
@@ -552,8 +566,16 @@ void tick_unfreeze(void)
raw_spin_lock(&tick_freeze_lock);
if (tick_freeze_depth == num_online_cpus()) {
+ /*
+ * Similar to tick_freeze(). On resumption the first CPU may
+ * acquire uncontended sleeping locks while other CPUs block on
+ * tick_freeze_lock.
+ */
+ lock_map_acquire_try(&tick_freeze_map);
timekeeping_resume();
sched_clock_resume();
+ lock_map_release(&tick_freeze_map);
+
system_state = SYSTEM_RUNNING;
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 33082c4e8154..95c6e3473a76 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -89,8 +89,11 @@ static bool delete_fprobe_node(struct fprobe_hlist_node *node)
{
lockdep_assert_held(&fprobe_mutex);
- WRITE_ONCE(node->fp, NULL);
- hlist_del_rcu(&node->hlist);
+ /* Avoid double deleting */
+ if (READ_ONCE(node->fp) != NULL) {
+ WRITE_ONCE(node->fp, NULL);
+ hlist_del_rcu(&node->hlist);
+ }
return !!find_first_fprobe_node(node->addr);
}
@@ -411,6 +414,102 @@ static void fprobe_graph_remove_ips(unsigned long *addrs, int num)
ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0);
}
+#ifdef CONFIG_MODULES
+
+#define FPROBE_IPS_BATCH_INIT 8
+/* instruction pointer address list */
+struct fprobe_addr_list {
+ int index;
+ int size;
+ unsigned long *addrs;
+};
+
+static int fprobe_addr_list_add(struct fprobe_addr_list *alist, unsigned long addr)
+{
+ unsigned long *addrs;
+
+ if (alist->index >= alist->size)
+ return -ENOMEM;
+
+ alist->addrs[alist->index++] = addr;
+ if (alist->index < alist->size)
+ return 0;
+
+ /* Expand the address list */
+ addrs = kcalloc(alist->size * 2, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return -ENOMEM;
+
+ memcpy(addrs, alist->addrs, alist->size * sizeof(*addrs));
+ alist->size *= 2;
+ kfree(alist->addrs);
+ alist->addrs = addrs;
+
+ return 0;
+}
+
+static void fprobe_remove_node_in_module(struct module *mod, struct hlist_head *head,
+ struct fprobe_addr_list *alist)
+{
+ struct fprobe_hlist_node *node;
+ int ret = 0;
+
+ hlist_for_each_entry_rcu(node, head, hlist) {
+ if (!within_module(node->addr, mod))
+ continue;
+ if (delete_fprobe_node(node))
+ continue;
+ /*
+ * If failed to update alist, just continue to update hlist.
+ * Therefore, at list user handler will not hit anymore.
+ */
+ if (!ret)
+ ret = fprobe_addr_list_add(alist, node->addr);
+ }
+}
+
+/* Handle module unloading to manage fprobe_ip_table. */
+static int fprobe_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct fprobe_addr_list alist = {.size = FPROBE_IPS_BATCH_INIT};
+ struct module *mod = data;
+ int i;
+
+ if (val != MODULE_STATE_GOING)
+ return NOTIFY_DONE;
+
+ alist.addrs = kcalloc(alist.size, sizeof(*alist.addrs), GFP_KERNEL);
+ /* If failed to alloc memory, we can not remove ips from hash. */
+ if (!alist.addrs)
+ return NOTIFY_DONE;
+
+ mutex_lock(&fprobe_mutex);
+ for (i = 0; i < FPROBE_IP_TABLE_SIZE; i++)
+ fprobe_remove_node_in_module(mod, &fprobe_ip_table[i], &alist);
+
+ if (alist.index < alist.size && alist.index > 0)
+ ftrace_set_filter_ips(&fprobe_graph_ops.ops,
+ alist.addrs, alist.index, 1, 0);
+ mutex_unlock(&fprobe_mutex);
+
+ kfree(alist.addrs);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block fprobe_module_nb = {
+ .notifier_call = fprobe_module_callback,
+ .priority = 0,
+};
+
+static int __init init_fprobe_module(void)
+{
+ return register_module_notifier(&fprobe_module_nb);
+}
+early_initcall(init_fprobe_module);
+#endif
+
static int symbols_cmp(const void *a, const void *b)
{
const char **str_a = (const char **) a;
@@ -445,6 +544,7 @@ struct filter_match_data {
size_t index;
size_t size;
unsigned long *addrs;
+ struct module **mods;
};
static int filter_match_callback(void *data, const char *name, unsigned long addr)
@@ -458,30 +558,47 @@ static int filter_match_callback(void *data, const char *name, unsigned long add
if (!ftrace_location(addr))
return 0;
- if (match->addrs)
- match->addrs[match->index] = addr;
+ if (match->addrs) {
+ struct module *mod = __module_text_address(addr);
+
+ if (mod && !try_module_get(mod))
+ return 0;
+ match->mods[match->index] = mod;
+ match->addrs[match->index] = addr;
+ }
match->index++;
return match->index == match->size;
}
/*
* Make IP list from the filter/no-filter glob patterns.
- * Return the number of matched symbols, or -ENOENT.
+ * Return the number of matched symbols, or errno.
+ * If @addrs == NULL, this just counts the number of matched symbols. If @addrs
+ * is passed with an array, we need to pass the an @mods array of the same size
+ * to increment the module refcount for each symbol.
+ * This means we also need to call `module_put` for each element of @mods after
+ * using the @addrs.
*/
-static int ip_list_from_filter(const char *filter, const char *notfilter,
- unsigned long *addrs, size_t size)
+static int get_ips_from_filter(const char *filter, const char *notfilter,
+ unsigned long *addrs, struct module **mods,
+ size_t size)
{
struct filter_match_data match = { .filter = filter, .notfilter = notfilter,
- .index = 0, .size = size, .addrs = addrs};
+ .index = 0, .size = size, .addrs = addrs, .mods = mods};
int ret;
+ if (addrs && !mods)
+ return -EINVAL;
+
ret = kallsyms_on_each_symbol(filter_match_callback, &match);
if (ret < 0)
return ret;
- ret = module_kallsyms_on_each_symbol(NULL, filter_match_callback, &match);
- if (ret < 0)
- return ret;
+ if (IS_ENABLED(CONFIG_MODULES)) {
+ ret = module_kallsyms_on_each_symbol(NULL, filter_match_callback, &match);
+ if (ret < 0)
+ return ret;
+ }
return match.index ?: -ENOENT;
}
@@ -543,24 +660,35 @@ static int fprobe_init(struct fprobe *fp, unsigned long *addrs, int num)
*/
int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
{
- unsigned long *addrs;
- int ret;
+ unsigned long *addrs __free(kfree) = NULL;
+ struct module **mods __free(kfree) = NULL;
+ int ret, num;
if (!fp || !filter)
return -EINVAL;
- ret = ip_list_from_filter(filter, notfilter, NULL, FPROBE_IPS_MAX);
- if (ret < 0)
- return ret;
+ num = get_ips_from_filter(filter, notfilter, NULL, NULL, FPROBE_IPS_MAX);
+ if (num < 0)
+ return num;
- addrs = kcalloc(ret, sizeof(unsigned long), GFP_KERNEL);
+ addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return -ENOMEM;
- ret = ip_list_from_filter(filter, notfilter, addrs, ret);
- if (ret > 0)
- ret = register_fprobe_ips(fp, addrs, ret);
- kfree(addrs);
+ mods = kcalloc(num, sizeof(*mods), GFP_KERNEL);
+ if (!mods)
+ return -ENOMEM;
+
+ ret = get_ips_from_filter(filter, notfilter, addrs, mods, num);
+ if (ret < 0)
+ return ret;
+
+ ret = register_fprobe_ips(fp, addrs, ret);
+
+ for (int i = 0; i < num; i++) {
+ if (mods[i])
+ module_put(mods[i]);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(register_fprobe);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1a48aedb5255..a8a02868b435 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3256,6 +3256,31 @@ static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash,
}
/*
+ * Remove functions from @hash that are in @notrace_hash
+ */
+static void remove_hash(struct ftrace_hash *hash, struct ftrace_hash *notrace_hash)
+{
+ struct ftrace_func_entry *entry;
+ struct hlist_node *tmp;
+ int size;
+ int i;
+
+ /* If the notrace hash is empty, there's nothing to do */
+ if (ftrace_hash_empty(notrace_hash))
+ return;
+
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
+ if (!__ftrace_lookup_ip(notrace_hash, entry->ip))
+ continue;
+ remove_hash_entry(hash, entry);
+ kfree(entry);
+ }
+ }
+}
+
+/*
* Add to @hash only those that are in both @new_hash1 and @new_hash2
*
* The notrace_hash updates uses just the intersect_hash() function
@@ -3295,67 +3320,6 @@ static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_has
return 0;
}
-/* Return a new hash that has a union of all @ops->filter_hash entries */
-static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
-{
- struct ftrace_hash *new_hash = NULL;
- struct ftrace_ops *subops;
- int size_bits;
- int ret;
-
- if (ops->func_hash->filter_hash)
- size_bits = ops->func_hash->filter_hash->size_bits;
- else
- size_bits = FTRACE_HASH_DEFAULT_BITS;
-
- list_for_each_entry(subops, &ops->subop_list, list) {
- ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits);
- if (ret < 0) {
- free_ftrace_hash(new_hash);
- return NULL;
- }
- /* Nothing more to do if new_hash is empty */
- if (ftrace_hash_empty(new_hash))
- break;
- }
- /* Can't return NULL as that means this failed */
- return new_hash ? : EMPTY_HASH;
-}
-
-/* Make @ops trace evenything except what all its subops do not trace */
-static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops)
-{
- struct ftrace_hash *new_hash = NULL;
- struct ftrace_ops *subops;
- int size_bits;
- int ret;
-
- list_for_each_entry(subops, &ops->subop_list, list) {
- struct ftrace_hash *next_hash;
-
- if (!new_hash) {
- size_bits = subops->func_hash->notrace_hash->size_bits;
- new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash);
- if (!new_hash)
- return NULL;
- continue;
- }
- size_bits = new_hash->size_bits;
- next_hash = new_hash;
- new_hash = alloc_ftrace_hash(size_bits);
- ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash);
- free_ftrace_hash(next_hash);
- if (ret < 0) {
- free_ftrace_hash(new_hash);
- return NULL;
- }
- /* Nothing more to do if new_hash is empty */
- if (ftrace_hash_empty(new_hash))
- break;
- }
- return new_hash;
-}
-
static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B)
{
struct ftrace_func_entry *entry;
@@ -3427,6 +3391,93 @@ static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_
return 0;
}
+static int add_first_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
+ struct ftrace_ops_hash *func_hash)
+{
+ /* If the filter hash is not empty, simply remove the nohash from it */
+ if (!ftrace_hash_empty(func_hash->filter_hash)) {
+ *filter_hash = copy_hash(func_hash->filter_hash);
+ if (!*filter_hash)
+ return -ENOMEM;
+ remove_hash(*filter_hash, func_hash->notrace_hash);
+ *notrace_hash = EMPTY_HASH;
+
+ } else {
+ *notrace_hash = copy_hash(func_hash->notrace_hash);
+ if (!*notrace_hash)
+ return -ENOMEM;
+ *filter_hash = EMPTY_HASH;
+ }
+ return 0;
+}
+
+static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
+ struct ftrace_ops_hash *ops_hash, struct ftrace_ops_hash *subops_hash)
+{
+ int size_bits;
+ int ret;
+
+ /* If the subops trace all functions so must the main ops */
+ if (ftrace_hash_empty(ops_hash->filter_hash) ||
+ ftrace_hash_empty(subops_hash->filter_hash)) {
+ *filter_hash = EMPTY_HASH;
+ } else {
+ /*
+ * The main ops filter hash is not empty, so its
+ * notrace_hash had better be, as the notrace hash
+ * is only used for empty main filter hashes.
+ */
+ WARN_ON_ONCE(!ftrace_hash_empty(ops_hash->notrace_hash));
+
+ size_bits = max(ops_hash->filter_hash->size_bits,
+ subops_hash->filter_hash->size_bits);
+
+ /* Copy the subops hash */
+ *filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash);
+ if (!filter_hash)
+ return -ENOMEM;
+ /* Remove any notrace functions from the copy */
+ remove_hash(*filter_hash, subops_hash->notrace_hash);
+
+ ret = append_hash(filter_hash, ops_hash->filter_hash,
+ size_bits);
+ if (ret < 0) {
+ free_ftrace_hash(*filter_hash);
+ return ret;
+ }
+ }
+
+ /*
+ * Only process notrace hashes if the main filter hash is empty
+ * (tracing all functions), otherwise the filter hash will just
+ * remove the notrace hash functions, and the notrace hash is
+ * not needed.
+ */
+ if (ftrace_hash_empty(*filter_hash)) {
+ /*
+ * Intersect the notrace functions. That is, if two
+ * subops are not tracing a set of functions, the
+ * main ops will only not trace the functions that are
+ * in both subops, but has to trace the functions that
+ * are only notrace in one of the subops, for the other
+ * subops to be able to trace them.
+ */
+ size_bits = max(ops_hash->notrace_hash->size_bits,
+ subops_hash->notrace_hash->size_bits);
+ *notrace_hash = alloc_ftrace_hash(size_bits);
+ if (!*notrace_hash)
+ return -ENOMEM;
+
+ ret = intersect_hash(notrace_hash, ops_hash->notrace_hash,
+ subops_hash->notrace_hash);
+ if (ret < 0) {
+ free_ftrace_hash(*notrace_hash);
+ return ret;
+ }
+ }
+ return 0;
+}
+
/**
* ftrace_startup_subops - enable tracing for subops of an ops
* @ops: Manager ops (used to pick all the functions of its subops)
@@ -3443,7 +3494,6 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
struct ftrace_hash *notrace_hash;
struct ftrace_hash *save_filter_hash;
struct ftrace_hash *save_notrace_hash;
- int size_bits;
int ret;
if (unlikely(ftrace_disabled))
@@ -3467,14 +3517,14 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
/* For the first subops to ops just enable it normally */
if (list_empty(&ops->subop_list)) {
- /* Just use the subops hashes */
- filter_hash = copy_hash(subops->func_hash->filter_hash);
- notrace_hash = copy_hash(subops->func_hash->notrace_hash);
- if (!filter_hash || !notrace_hash) {
- free_ftrace_hash(filter_hash);
- free_ftrace_hash(notrace_hash);
- return -ENOMEM;
- }
+
+ /* The ops was empty, should have empty hashes */
+ WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->filter_hash));
+ WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->notrace_hash));
+
+ ret = add_first_hash(&filter_hash, &notrace_hash, subops->func_hash);
+ if (ret < 0)
+ return ret;
save_filter_hash = ops->func_hash->filter_hash;
save_notrace_hash = ops->func_hash->notrace_hash;
@@ -3500,48 +3550,16 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
/*
* Here there's already something attached. Here are the rules:
- * o If either filter_hash is empty then the final stays empty
- * o Otherwise, the final is a superset of both hashes
- * o If either notrace_hash is empty then the final stays empty
- * o Otherwise, the final is an intersection between the hashes
+ * If the new subops and main ops filter hashes are not empty:
+ * o Make a copy of the subops filter hash
+ * o Remove all functions in the nohash from it.
+ * o Add in the main hash filter functions
+ * o Remove any of these functions from the main notrace hash
*/
- if (ftrace_hash_empty(ops->func_hash->filter_hash) ||
- ftrace_hash_empty(subops->func_hash->filter_hash)) {
- filter_hash = EMPTY_HASH;
- } else {
- size_bits = max(ops->func_hash->filter_hash->size_bits,
- subops->func_hash->filter_hash->size_bits);
- filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
- if (!filter_hash)
- return -ENOMEM;
- ret = append_hash(&filter_hash, subops->func_hash->filter_hash,
- size_bits);
- if (ret < 0) {
- free_ftrace_hash(filter_hash);
- return ret;
- }
- }
-
- if (ftrace_hash_empty(ops->func_hash->notrace_hash) ||
- ftrace_hash_empty(subops->func_hash->notrace_hash)) {
- notrace_hash = EMPTY_HASH;
- } else {
- size_bits = max(ops->func_hash->filter_hash->size_bits,
- subops->func_hash->filter_hash->size_bits);
- notrace_hash = alloc_ftrace_hash(size_bits);
- if (!notrace_hash) {
- free_ftrace_hash(filter_hash);
- return -ENOMEM;
- }
- ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash,
- subops->func_hash->filter_hash);
- if (ret < 0) {
- free_ftrace_hash(filter_hash);
- free_ftrace_hash(notrace_hash);
- return ret;
- }
- }
+ ret = add_next_hash(&filter_hash, &notrace_hash, ops->func_hash, subops->func_hash);
+ if (ret < 0)
+ return ret;
list_add(&subops->list, &ops->subop_list);
@@ -3557,6 +3575,42 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
return ret;
}
+static int rebuild_hashes(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
+ struct ftrace_ops *ops)
+{
+ struct ftrace_ops_hash temp_hash;
+ struct ftrace_ops *subops;
+ bool first = true;
+ int ret;
+
+ temp_hash.filter_hash = EMPTY_HASH;
+ temp_hash.notrace_hash = EMPTY_HASH;
+
+ list_for_each_entry(subops, &ops->subop_list, list) {
+ *filter_hash = EMPTY_HASH;
+ *notrace_hash = EMPTY_HASH;
+
+ if (first) {
+ ret = add_first_hash(filter_hash, notrace_hash, subops->func_hash);
+ if (ret < 0)
+ return ret;
+ first = false;
+ } else {
+ ret = add_next_hash(filter_hash, notrace_hash,
+ &temp_hash, subops->func_hash);
+ if (ret < 0) {
+ free_ftrace_hash(temp_hash.filter_hash);
+ free_ftrace_hash(temp_hash.notrace_hash);
+ return ret;
+ }
+ }
+
+ temp_hash.filter_hash = *filter_hash;
+ temp_hash.notrace_hash = *notrace_hash;
+ }
+ return 0;
+}
+
/**
* ftrace_shutdown_subops - Remove a subops from a manager ops
* @ops: A manager ops to remove @subops from
@@ -3605,14 +3659,9 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in
}
/* Rebuild the hashes without subops */
- filter_hash = append_hashes(ops);
- notrace_hash = intersect_hashes(ops);
- if (!filter_hash || !notrace_hash) {
- free_ftrace_hash(filter_hash);
- free_ftrace_hash(notrace_hash);
- list_add(&subops->list, &ops->subop_list);
- return -ENOMEM;
- }
+ ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
+ if (ret < 0)
+ return ret;
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
if (ret < 0) {
@@ -3628,11 +3677,11 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in
static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
struct ftrace_hash **orig_subhash,
- struct ftrace_hash *hash,
- int enable)
+ struct ftrace_hash *hash)
{
struct ftrace_ops *ops = subops->managed;
- struct ftrace_hash **orig_hash;
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
struct ftrace_hash *save_hash;
struct ftrace_hash *new_hash;
int ret;
@@ -3649,24 +3698,15 @@ static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
return -ENOMEM;
}
- /* Create a new_hash to hold the ops new functions */
- if (enable) {
- orig_hash = &ops->func_hash->filter_hash;
- new_hash = append_hashes(ops);
- } else {
- orig_hash = &ops->func_hash->notrace_hash;
- new_hash = intersect_hashes(ops);
- }
-
- /* Move the hash over to the new hash */
- ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable);
-
- free_ftrace_hash(new_hash);
+ ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
+ if (!ret)
+ ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
if (ret) {
/* Put back the original hash */
- free_ftrace_hash_rcu(*orig_subhash);
+ new_hash = *orig_subhash;
*orig_subhash = save_hash;
+ free_ftrace_hash_rcu(new_hash);
} else {
free_ftrace_hash_rcu(save_hash);
}
@@ -4890,7 +4930,7 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
int enable)
{
if (ops->flags & FTRACE_OPS_FL_SUBOP)
- return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable);
+ return ftrace_hash_move_and_update_subops(ops, orig_hash, hash);
/*
* If this ops is not enabled, it could be sharing its filters
@@ -4909,7 +4949,7 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
list_for_each_entry(subops, &op->subop_list, list) {
if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
subops->func_hash == ops->func_hash) {
- return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable);
+ return ftrace_hash_move_and_update_subops(subops, orig_hash, hash);
}
}
} while_for_each_ftrace_op(op);
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 968c5c3b0246..e4077500a91d 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -225,7 +225,12 @@ bool rv_is_nested_monitor(struct rv_monitor_def *mdef)
*/
bool rv_is_container_monitor(struct rv_monitor_def *mdef)
{
- struct rv_monitor_def *next = list_next_entry(mdef, list);
+ struct rv_monitor_def *next;
+
+ if (list_is_last(&mdef->list, &rv_monitors_list))
+ return false;
+
+ next = list_next_entry(mdef, list);
return next->parent == mdef->monitor || !mdef->monitor->enable;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b581e388a9d9..8ddf6b17215c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9806,6 +9806,7 @@ static int instance_mkdir(const char *name)
return ret;
}
+#ifdef CONFIG_MMU
static u64 map_pages(unsigned long start, unsigned long size)
{
unsigned long vmap_start, vmap_end;
@@ -9828,6 +9829,12 @@ static u64 map_pages(unsigned long start, unsigned long size)
return (u64)vmap_start;
}
+#else
+static inline u64 map_pages(unsigned long start, unsigned long size)
+{
+ return 0;
+}
+#endif
/**
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 969f48742d72..33cfbd4ed76d 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -370,7 +370,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
union trace_synth_field *data = &entry->fields[n_u64];
trace_seq_printf(s, print_fmt, se->fields[i]->name,
- STR_VAR_LEN_MAX,
(char *)entry + data->as_dynamic.offset,
i == se->n_fields - 1 ? "" : " ");
n_u64++;
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index 5d7ca80173ea..b40fa59159ac 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -919,9 +919,15 @@ static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mo
struct __find_tracepoint_cb_data *data = priv;
if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
- data->tpoint = tp;
- if (!data->mod)
+ /* If module is not specified, try getting module refcount. */
+ if (!data->mod && mod) {
+ /* If failed to get refcount, ignore this tracepoint. */
+ if (!try_module_get(mod))
+ return;
+
data->mod = mod;
+ }
+ data->tpoint = tp;
}
}
@@ -933,7 +939,11 @@ static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
data->tpoint = tp;
}
-/* Find a tracepoint from kernel and module. */
+/*
+ * Find a tracepoint from kernel and module. If the tracepoint is on the module,
+ * the module's refcount is incremented and returned as *@tp_mod. Thus, if it is
+ * not NULL, caller must call module_put(*tp_mod) after used the tracepoint.
+ */
static struct tracepoint *find_tracepoint(const char *tp_name,
struct module **tp_mod)
{
@@ -962,7 +972,10 @@ static void reenable_trace_fprobe(struct trace_fprobe *tf)
}
}
-/* Find a tracepoint from specified module. */
+/*
+ * Find a tracepoint from specified module. In this case, this does not get the
+ * module's refcount. The caller must ensure the module is not freed.
+ */
static struct tracepoint *find_tracepoint_in_module(struct module *mod,
const char *tp_name)
{
@@ -1169,11 +1182,6 @@ static int trace_fprobe_create_internal(int argc, const char *argv[],
if (is_tracepoint) {
ctx->flags |= TPARG_FL_TPOINT;
tpoint = find_tracepoint(symbol, &tp_mod);
- /* lock module until register this tprobe. */
- if (tp_mod && !try_module_get(tp_mod)) {
- tpoint = NULL;
- tp_mod = NULL;
- }
if (tpoint) {
ctx->funcname = kallsyms_lookup(
(unsigned long)tpoint->probestub,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 2f077d4158e5..0c357a89c58e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -880,8 +880,6 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
if (print_retval || print_retaddr)
trace_seq_puts(s, " /*");
- else
- trace_seq_putc(s, '\n');
} else {
print_retaddr = false;
trace_seq_printf(s, "} /* %ps", func);
@@ -899,7 +897,7 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
}
if (!entry || print_retval || print_retaddr)
- trace_seq_puts(s, " */\n");
+ trace_seq_puts(s, " */");
}
#else
@@ -975,7 +973,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
} else
trace_seq_puts(s, "();");
}
- trace_seq_printf(s, "\n");
+ trace_seq_putc(s, '\n');
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
cpu, iter->ent->pid, flags);
@@ -1313,10 +1311,11 @@ print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
* that if the funcgraph-tail option is enabled.
*/
if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
- trace_seq_puts(s, "}\n");
+ trace_seq_puts(s, "}");
else
- trace_seq_printf(s, "} /* %ps */\n", (void *)func);
+ trace_seq_printf(s, "} /* %ps */", (void *)func);
}
+ trace_seq_putc(s, '\n');
/* Overrun */
if (flags & TRACE_GRAPH_PRINT_OVERRUN)
diff --git a/lib/Kconfig b/lib/Kconfig
index 61cce0686b53..6c1b8f184267 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -139,27 +139,22 @@ config TRACE_MMIO_ACCESS
source "lib/crypto/Kconfig"
config CRC_CCITT
- tristate "CRC-CCITT functions"
+ tristate
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC-CCITT functions, but a module built outside
- the kernel tree does. Such modules that use library CRC-CCITT
- functions require M here.
+ The CRC-CCITT library functions. Select this if your module uses any
+ of the functions from <linux/crc-ccitt.h>.
config CRC16
- tristate "CRC16 functions"
+ tristate
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC16 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC16
- functions require M here.
+ The CRC16 library functions. Select this if your module uses any of
+ the functions from <linux/crc16.h>.
config CRC_T10DIF
- tristate "CRC calculation for the T10 Data Integrity Field"
+ tristate
help
- This option is only needed if a module that's not in the
- kernel tree needs to calculate CRC checks for use with the
- SCSI data integrity subsystem.
+ The CRC-T10DIF library functions. Select this if your module uses
+ any of the functions from <linux/crc-t10dif.h>.
config ARCH_HAS_CRC_T10DIF
bool
@@ -169,22 +164,17 @@ config CRC_T10DIF_ARCH
default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS
config CRC_ITU_T
- tristate "CRC ITU-T V.41 functions"
+ tristate
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC ITU-T V.41 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC ITU-T V.41
- functions require M here.
+ The CRC-ITU-T library functions. Select this if your module uses
+ any of the functions from <linux/crc-itu-t.h>.
config CRC32
- tristate "CRC32/CRC32c functions"
- default y
+ tristate
select BITREVERSE
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC32/CRC32c functions, but a module built outside
- the kernel tree does. Such modules that use library CRC32/CRC32c
- functions require M here.
+ The CRC32 library functions. Select this if your module uses any of
+ the functions from <linux/crc32.h> or <linux/crc32c.h>.
config ARCH_HAS_CRC32
bool
@@ -195,6 +185,9 @@ config CRC32_ARCH
config CRC64
tristate
+ help
+ The CRC64 library functions. Select this if your module uses any of
+ the functions from <linux/crc64.h>.
config ARCH_HAS_CRC64
bool
@@ -205,19 +198,21 @@ config CRC64_ARCH
config CRC4
tristate
+ help
+ The CRC4 library functions. Select this if your module uses any of
+ the functions from <linux/crc4.h>.
config CRC7
tristate
-
-config LIBCRC32C
- tristate
- select CRC32
help
- This option just selects CRC32 and is provided for compatibility
- purposes until the users are updated to select CRC32 directly.
+ The CRC7 library functions. Select this if your module uses any of
+ the functions from <linux/crc7.h>.
config CRC8
tristate
+ help
+ The CRC8 library functions. Select this if your module uses any of
+ the functions from <linux/crc8.h>.
config CRC_OPTIMIZATIONS
bool "Enable optimized CRC implementations" if EXPERT
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 860a0786bc1e..20b316207f9a 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -9,7 +9,7 @@
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
- select LIBCRC32C
+ select CRC32
help
B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
a routing protocol for multi-hop ad-hoc mesh networks. The
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index c5c4eef3a9ff..0aa21fcbf6ec 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -2,7 +2,7 @@
config CEPH_LIB
tristate "Ceph core library"
depends on INET
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_GCM
diff --git a/net/core/dev.c b/net/core/dev.c
index 0608605cfc24..75e104322ad5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1518,15 +1518,7 @@ void netdev_features_change(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_features_change);
-/**
- * netdev_state_change - device changes state
- * @dev: device to cause notification
- *
- * Called to indicate a device has changed state. This function calls
- * the notifier chains for netdev_chain and sends a NEWLINK message
- * to the routing socket.
- */
-void netdev_state_change(struct net_device *dev)
+void netif_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
struct netdev_notifier_change_info change_info = {
@@ -1538,7 +1530,6 @@ void netdev_state_change(struct net_device *dev)
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
}
}
-EXPORT_SYMBOL(netdev_state_change);
/**
* __netdev_notify_peers - notify network peers about existence of @dev,
diff --git a/net/core/dev_api.c b/net/core/dev_api.c
index 90bafb0b1b8c..90898cd540ce 100644
--- a/net/core/dev_api.c
+++ b/net/core/dev_api.c
@@ -327,3 +327,19 @@ int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
return ret;
}
EXPORT_SYMBOL_GPL(dev_xdp_propagate);
+
+/**
+ * netdev_state_change() - device changes state
+ * @dev: device to cause notification
+ *
+ * Called to indicate a device has changed state. This function calls
+ * the notifier chains for netdev_chain and sends a NEWLINK message
+ * to the routing socket.
+ */
+void netdev_state_change(struct net_device *dev)
+{
+ netdev_lock_ops(dev);
+ netif_state_change(dev);
+ netdev_unlock_ops(dev);
+}
+EXPORT_SYMBOL(netdev_state_change);
diff --git a/net/core/filter.c b/net/core/filter.c
index bc6828761a47..79cab4d78dc3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -218,24 +218,36 @@ BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
return 0;
}
+static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset)
+{
+ if (likely(offset >= 0))
+ return offset;
+
+ if (offset >= SKF_NET_OFF)
+ return offset - SKF_NET_OFF + skb_network_offset(skb);
+
+ if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb))
+ return offset - SKF_LL_OFF + skb_mac_offset(skb);
+
+ return INT_MIN;
+}
+
BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
- u8 tmp, *ptr;
+ u8 tmp;
const int len = sizeof(tmp);
- if (offset >= 0) {
- if (headlen - offset >= len)
- return *(u8 *)(data + offset);
- if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
- return tmp;
- } else {
- ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
- if (likely(ptr))
- return *(u8 *)ptr;
- }
+ offset = bpf_skb_load_helper_convert_offset(skb, offset);
+ if (offset == INT_MIN)
+ return -EFAULT;
- return -EFAULT;
+ if (headlen - offset >= len)
+ return *(u8 *)(data + offset);
+ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+ return tmp;
+ else
+ return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
@@ -248,21 +260,19 @@ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
- __be16 tmp, *ptr;
+ __be16 tmp;
const int len = sizeof(tmp);
- if (offset >= 0) {
- if (headlen - offset >= len)
- return get_unaligned_be16(data + offset);
- if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
- return be16_to_cpu(tmp);
- } else {
- ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
- if (likely(ptr))
- return get_unaligned_be16(ptr);
- }
+ offset = bpf_skb_load_helper_convert_offset(skb, offset);
+ if (offset == INT_MIN)
+ return -EFAULT;
- return -EFAULT;
+ if (headlen - offset >= len)
+ return get_unaligned_be16(data + offset);
+ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+ return be16_to_cpu(tmp);
+ else
+ return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
@@ -275,21 +285,19 @@ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
- __be32 tmp, *ptr;
+ __be32 tmp;
const int len = sizeof(tmp);
- if (likely(offset >= 0)) {
- if (headlen - offset >= len)
- return get_unaligned_be32(data + offset);
- if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
- return be32_to_cpu(tmp);
- } else {
- ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
- if (likely(ptr))
- return get_unaligned_be32(ptr);
- }
+ offset = bpf_skb_load_helper_convert_offset(skb, offset);
+ if (offset == INT_MIN)
+ return -EFAULT;
- return -EFAULT;
+ if (headlen - offset >= len)
+ return get_unaligned_be32(data + offset);
+ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+ return be32_to_cpu(tmp);
+ else
+ return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index cb04ef2b9807..864f3bbc3a4c 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -183,7 +183,7 @@ static void linkwatch_do_dev(struct net_device *dev)
else
dev_deactivate(dev);
- netdev_state_change(dev);
+ netif_state_change(dev);
}
/* Note: our callers are responsible for calling netdev_tracker_free().
* This is the reason we use __dev_put() instead of dev_put().
@@ -240,7 +240,9 @@ static void __linkwatch_run_queue(int urgent_only)
*/
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
spin_unlock_irq(&lweventlist_lock);
+ netdev_lock_ops(dev);
linkwatch_do_dev(dev);
+ netdev_unlock_ops(dev);
do_dev--;
spin_lock_irq(&lweventlist_lock);
}
@@ -253,25 +255,41 @@ static void __linkwatch_run_queue(int urgent_only)
spin_unlock_irq(&lweventlist_lock);
}
-void linkwatch_sync_dev(struct net_device *dev)
+static bool linkwatch_clean_dev(struct net_device *dev)
{
unsigned long flags;
- int clean = 0;
+ bool clean = false;
spin_lock_irqsave(&lweventlist_lock, flags);
if (!list_empty(&dev->link_watch_list)) {
list_del_init(&dev->link_watch_list);
- clean = 1;
+ clean = true;
/* We must release netdev tracker under
* the spinlock protection.
*/
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
- if (clean)
+
+ return clean;
+}
+
+void __linkwatch_sync_dev(struct net_device *dev)
+{
+ netdev_ops_assert_locked(dev);
+
+ if (linkwatch_clean_dev(dev))
linkwatch_do_dev(dev);
}
+void linkwatch_sync_dev(struct net_device *dev)
+{
+ if (linkwatch_clean_dev(dev)) {
+ netdev_lock_ops(dev);
+ linkwatch_do_dev(dev);
+ netdev_unlock_ops(dev);
+ }
+}
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
diff --git a/net/core/lock_debug.c b/net/core/lock_debug.c
index b7f22dc92a6f..941e26c1343d 100644
--- a/net/core/lock_debug.c
+++ b/net/core/lock_debug.c
@@ -20,11 +20,11 @@ int netdev_debug_event(struct notifier_block *nb, unsigned long event,
switch (cmd) {
case NETDEV_REGISTER:
case NETDEV_UP:
+ case NETDEV_CHANGE:
netdev_ops_assert_locked(dev);
fallthrough;
case NETDEV_DOWN:
case NETDEV_REBOOT:
- case NETDEV_CHANGE:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index c23852835050..39a5b72e861f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1043,7 +1043,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
-void netdev_set_operstate(struct net_device *dev, int newstate)
+void netif_set_operstate(struct net_device *dev, int newstate)
{
unsigned int old = READ_ONCE(dev->operstate);
@@ -1052,9 +1052,9 @@ void netdev_set_operstate(struct net_device *dev, int newstate)
return;
} while (!try_cmpxchg(&dev->operstate, &old, newstate));
- netdev_state_change(dev);
+ netif_state_change(dev);
}
-EXPORT_SYMBOL(netdev_set_operstate);
+EXPORT_SYMBOL(netif_set_operstate);
static void set_operstate(struct net_device *dev, unsigned char transition)
{
@@ -1080,7 +1080,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
break;
}
- netdev_set_operstate(dev, operstate);
+ netif_set_operstate(dev, operstate);
}
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
@@ -3027,7 +3027,7 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
err = validate_linkmsg(dev, tb, extack);
if (err < 0)
- goto errout;
+ return err;
if (tb[IFLA_IFNAME])
nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
@@ -3396,7 +3396,7 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
errout:
if (status & DO_SETLINK_MODIFIED) {
if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
- netdev_state_change(dev);
+ netif_state_change(dev);
if (err < 0)
net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
@@ -3676,8 +3676,11 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
nla_len(tb[IFLA_BROADCAST]));
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
- if (tb[IFLA_OPERSTATE])
+ if (tb[IFLA_OPERSTATE]) {
+ netdev_lock_ops(dev);
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+ netdev_unlock_ops(dev);
+ }
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
if (tb[IFLA_GROUP])
diff --git a/net/core/sock.c b/net/core/sock.c
index f67a3c5b0988..e54449c9ab0b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2130,6 +2130,8 @@ lenout:
*/
static inline void sock_lock_init(struct sock *sk)
{
+ sk_owner_clear(sk);
+
if (sk->sk_kern_sock)
sock_lock_init_class_and_name(
sk,
@@ -2226,6 +2228,9 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
cgroup_sk_free(&sk->sk_cgrp_data);
mem_cgroup_sk_free(sk);
security_sk_free(sk);
+
+ sk_owner_put(sk);
+
if (slab != NULL)
kmem_cache_free(slab, sk);
else
diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h
index 1e790413db0e..4a9a946cabf0 100644
--- a/net/ethtool/cmis.h
+++ b/net/ethtool/cmis.h
@@ -101,7 +101,6 @@ struct ethtool_cmis_cdb_rpl {
};
u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs);
-u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs);
void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl,
diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c
index d159dc121bde..0e2691ccb0df 100644
--- a/net/ethtool/cmis_cdb.c
+++ b/net/ethtool/cmis_cdb.c
@@ -16,15 +16,6 @@ u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs)
return 8 * (1 + min_t(u8, num_of_byte_octs, 15));
}
-/* For accessing the EPL field on page 9Fh, the allowable length extension is
- * min(i, 255) byte octets where i specifies the allowable additional number of
- * byte octets in a READ or a WRITE.
- */
-u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs)
-{
- return 8 * (1 + min_t(u8, num_of_byte_octs, 255));
-}
-
void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl,
u8 lpl_len, u8 *epl, u16 epl_len,
@@ -33,19 +24,16 @@ void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
{
args->req.id = cpu_to_be16(cmd);
args->req.lpl_len = lpl_len;
- if (lpl) {
+ if (lpl)
memcpy(args->req.payload, lpl, args->req.lpl_len);
- args->read_write_len_ext =
- ethtool_cmis_get_max_lpl_size(read_write_len_ext);
- }
if (epl) {
args->req.epl_len = cpu_to_be16(epl_len);
args->req.epl = epl;
- args->read_write_len_ext =
- ethtool_cmis_get_max_epl_size(read_write_len_ext);
}
args->max_duration = max_duration;
+ args->read_write_len_ext =
+ ethtool_cmis_get_max_lpl_size(read_write_len_ext);
args->msleep_pre_rpl = msleep_pre_rpl;
args->rpl_exp_len = rpl_exp_len;
args->flags = flags;
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 0cb6da1f692a..49bea6b45bd5 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -830,6 +830,7 @@ void ethtool_ringparam_get_cfg(struct net_device *dev,
/* Driver gives us current state, we want to return current config */
kparam->tcp_data_split = dev->cfg->hds_config;
+ kparam->hds_thresh = dev->cfg->hds_thresh;
}
static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info)
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 221639407c72..8262cc10f98d 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -60,7 +60,7 @@ static struct devlink *netdev_to_devlink_get(struct net_device *dev)
u32 ethtool_op_get_link(struct net_device *dev)
{
/* Synchronize carrier state with link watch, see also rtnl_getlink() */
- linkwatch_sync_dev(dev);
+ __linkwatch_sync_dev(dev);
return netif_carrier_ok(dev) ? 1 : 0;
}
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index a163d40c6431..977beeaaa2f9 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -500,7 +500,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
netdev_unlock_ops(req_info->dev);
rtnl_unlock();
if (ret < 0)
- goto err_cleanup;
+ goto err_dev;
ret = ops->reply_size(req_info, reply_data);
if (ret < 0)
goto err_cleanup;
@@ -560,7 +560,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
netdev_unlock_ops(dev);
rtnl_unlock();
if (ret < 0)
- goto out;
+ goto out_cancel;
ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr);
if (ret < 0)
goto out;
@@ -569,6 +569,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
out:
if (ctx->ops->cleanup_data)
ctx->ops->cleanup_data(ctx->reply_data);
+out_cancel:
ctx->reply_data->dev = NULL;
if (ret < 0)
genlmsg_cancel(skb, ehdr);
@@ -793,7 +794,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
ethnl_init_reply_data(reply_data, ops, dev);
ret = ops->prepare_data(req_info, reply_data, &info);
if (ret < 0)
- goto err_cleanup;
+ goto err_rep;
ret = ops->reply_size(req_info, reply_data);
if (ret < 0)
goto err_cleanup;
@@ -828,6 +829,7 @@ err_skb:
err_cleanup:
if (ops->cleanup_data)
ops->cleanup_data(reply_data);
+err_rep:
kfree(reply_data);
kfree(req_info);
return;
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 439cfb7ad5d1..1b1b700ec05e 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -33,14 +33,14 @@ static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
struct net_device *dev = master->dev;
if (!is_admin_up(dev)) {
- netdev_set_operstate(dev, IF_OPER_DOWN);
+ netif_set_operstate(dev, IF_OPER_DOWN);
return;
}
if (has_carrier)
- netdev_set_operstate(dev, IF_OPER_UP);
+ netif_set_operstate(dev, IF_OPER_UP);
else
- netdev_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
+ netif_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
}
static bool hsr_check_carrier(struct hsr_port *master)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2cffb8f4a2bc..9ba83f0c9928 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3154,12 +3154,13 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
rtnl_net_lock(net);
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
- netdev_lock_ops(dev);
- if (dev)
+ if (dev) {
+ netdev_lock_ops(dev);
err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL);
- else
+ netdev_unlock_ops(dev);
+ } else {
err = -ENODEV;
- netdev_unlock_ops(dev);
+ }
rtnl_net_unlock(net);
return err;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ab12b816ab94..210b84cecc24 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -470,10 +470,10 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
goto out;
hash = fl6->mp_hash;
- if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) &&
- rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
- strict) >= 0) {
- match = first;
+ if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
+ if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
+ strict) >= 0)
+ match = first;
goto out;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 409bd415ef1d..24c2de1891bd 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -899,13 +899,17 @@ create_child:
goto dispose_child;
}
- if (!subflow_hmac_valid(req, &mp_opt) ||
- !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ if (!subflow_hmac_valid(req, &mp_opt)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
goto dispose_child;
}
+ if (!mptcp_can_accept_new_subflow(owner)) {
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+ goto dispose_child;
+ }
+
/* move the msk reference ownership to the subflow */
subflow_req->msk = NULL;
ctx->conn = (struct sock *)owner;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index df2dc21304ef..047ba81865ed 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -212,7 +212,7 @@ config NF_CT_PROTO_SCTP
bool 'SCTP protocol connection tracking support'
depends on NETFILTER_ADVANCED
default y
- select LIBCRC32C
+ select CRC32
help
With this option enabled, the layer 3 independent connection
tracking code will be able to do state tracking on SCTP connections.
@@ -475,7 +475,7 @@ endif # NF_CONNTRACK
config NF_TABLES
select NETFILTER_NETLINK
- select LIBCRC32C
+ select CRC32
tristate "Netfilter nf_tables support"
help
nftables is the new packet classification framework that intends to
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 2a3017b9c001..8c5b1fe12d07 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -105,7 +105,7 @@ config IP_VS_PROTO_AH
config IP_VS_PROTO_SCTP
bool "SCTP load balancing support"
- select LIBCRC32C
+ select CRC32
help
This option enables support for load balancing SCTP transport
protocol. Say Y if unsure.
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index b8d3c3213efe..c15db28c5ebc 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -994,8 +994,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
+ NFT_PIPAPO_AVX2_AND(3, 4, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize);
- NFT_PIPAPO_AVX2_AND(0, 4, 5);
+ NFT_PIPAPO_AVX2_AND(0, 3, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize);
NFT_PIPAPO_AVX2_AND(2, 6, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 2535f3f9f462..5481bd561eb4 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -11,7 +11,7 @@ config OPENVSWITCH
(!NF_NAT || NF_NAT) && \
(!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT)))
depends on PSAMPLE || !PSAMPLE
- select LIBCRC32C
+ select CRC32
select MPLS
select NET_MPLS_GSO
select DST_CACHE
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 8180d0c12fce..a800127effcd 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -784,7 +784,7 @@ config NET_ACT_SKBEDIT
config NET_ACT_CSUM
tristate "Checksum Updating"
depends on NET_CLS_ACT && INET
- select LIBCRC32C
+ select CRC32
help
Say Y here to update some common checksum after some direct
packet alterations.
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 4f648af8cfaa..ecec0a1e1c1a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -2057,6 +2057,7 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
+ int ret = -EMSGSIZE;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
@@ -2101,11 +2102,45 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
return skb->len;
+cls_op_not_supp:
+ ret = -EOPNOTSUPP;
out_nlmsg_trim:
nla_put_failure:
-cls_op_not_supp:
nlmsg_trim(skb, b);
- return -1;
+ return ret;
+}
+
+static struct sk_buff *tfilter_notify_prep(struct net *net,
+ struct sk_buff *oskb,
+ struct nlmsghdr *n,
+ struct tcf_proto *tp,
+ struct tcf_block *block,
+ struct Qdisc *q, u32 parent,
+ void *fh, int event,
+ u32 portid, bool rtnl_held,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
+ struct sk_buff *skb;
+ int ret;
+
+retry:
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOBUFS);
+
+ ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+ n->nlmsg_seq, n->nlmsg_flags, event, false,
+ rtnl_held, extack);
+ if (ret <= 0) {
+ kfree_skb(skb);
+ if (ret == -EMSGSIZE) {
+ size += NLMSG_GOODSIZE;
+ goto retry;
+ }
+ return ERR_PTR(-EINVAL);
+ }
+ return skb;
}
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
@@ -2121,16 +2156,10 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
return 0;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
- n->nlmsg_seq, n->nlmsg_flags, event,
- false, rtnl_held, extack) <= 0) {
- kfree_skb(skb);
- return -EINVAL;
- }
+ skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
+ portid, rtnl_held, extack);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
if (unicast)
err = rtnl_unicast(skb, net, portid);
@@ -2153,16 +2182,11 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
return tp->ops->delete(tp, fh, last, rtnl_held, extack);
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
- n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
- false, rtnl_held, extack) <= 0) {
+ skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
+ RTM_DELTFILTER, portid, rtnl_held, extack);
+ if (IS_ERR(skb)) {
NL_SET_ERR_MSG(extack, "Failed to build del event notification");
- kfree_skb(skb);
- return -EINVAL;
+ return PTR_ERR(skb);
}
err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 81189d02fee7..12dd71139da3 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -65,10 +65,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
drop_func, dequeue_func);
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
- * or HTB crashes. Defer it for next round.
- */
- if (q->stats.drop_count && sch->q.qlen) {
+ if (q->stats.drop_count) {
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
q->stats.drop_count = 0;
q->stats.drop_len = 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index c69b999fae17..e0a81d313aa7 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -105,6 +105,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return -ENOBUFS;
gnet_stats_basic_sync_init(&cl->bstats);
+ INIT_LIST_HEAD(&cl->alist);
cl->common.classid = classid;
cl->quantum = quantum;
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
@@ -229,7 +230,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
{
struct drr_class *cl = (struct drr_class *)arg;
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
}
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
@@ -390,7 +391,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
if (unlikely(skb == NULL))
goto out;
if (cl->qdisc->q.qlen == 0)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
@@ -431,7 +432,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->qdisc->q.qlen)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
qdisc_reset(cl->qdisc);
}
}
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 516038a44163..c3bdeb14185b 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -293,7 +293,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
* to remove them.
*/
if (!ets_class_is_strict(q, cl) && sch->q.qlen)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
}
static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
@@ -488,7 +488,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
if (unlikely(!skb))
goto out;
if (cl->qdisc->q.qlen == 0)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
return ets_qdisc_dequeue_skb(sch, skb);
}
@@ -657,7 +657,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
}
for (i = q->nbands; i < oldbands; i++) {
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
- list_del(&q->classes[i].alist);
+ list_del_init(&q->classes[i].alist);
qdisc_tree_flush_backlog(q->classes[i].qdisc);
}
WRITE_ONCE(q->nstrict, nstrict);
@@ -713,7 +713,7 @@ static void ets_qdisc_reset(struct Qdisc *sch)
for (band = q->nstrict; band < q->nbands; band++) {
if (q->classes[band].qdisc->q.qlen)
- list_del(&q->classes[band].alist);
+ list_del_init(&q->classes[band].alist);
}
for (band = 0; band < q->nbands; band++)
qdisc_reset(q->classes[band].qdisc);
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 799f5397ad4c..6c9029f71e88 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -315,10 +315,8 @@ begin:
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
- * or HTB crashes. Defer it for next round.
- */
- if (q->cstats.drop_count && sch->q.qlen) {
+
+ if (q->cstats.drop_count) {
qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
q->cstats.drop_len);
q->cstats.drop_count = 0;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index c287bf8423b4..ce5045eea065 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -203,7 +203,10 @@ eltree_insert(struct hfsc_class *cl)
static inline void
eltree_remove(struct hfsc_class *cl)
{
- rb_erase(&cl->el_node, &cl->sched->eligible);
+ if (!RB_EMPTY_NODE(&cl->el_node)) {
+ rb_erase(&cl->el_node, &cl->sched->eligible);
+ RB_CLEAR_NODE(&cl->el_node);
+ }
}
static inline void
@@ -1220,7 +1223,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
* needs to be called explicitly to remove a class from vttree.
*/
- update_vf(cl, 0, 0);
+ if (cl->cl_nactive)
+ update_vf(cl, 0, 0);
if (cl->cl_flags & HFSC_RSC)
eltree_remove(cl);
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c31bc5489bdd..4b9a639b642e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1485,6 +1485,8 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
+ if (!cl->prio_activity)
+ return;
htb_deactivate(qdisc_priv(sch), cl);
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 2cfbc977fe6d..687a932eb9b2 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -347,7 +347,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
struct qfq_aggregate *agg = cl->agg;
- list_del(&cl->alist); /* remove from RR queue of the aggregate */
+ list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
if (list_empty(&agg->active)) /* agg is now inactive */
qfq_deactivate_agg(q, agg);
}
@@ -474,6 +474,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
gnet_stats_basic_sync_init(&cl->bstats);
cl->common.classid = classid;
cl->deficit = lmax;
+ INIT_LIST_HEAD(&cl->alist);
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
classid, NULL);
@@ -982,7 +983,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
cl->deficit -= (int) len;
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
cl->deficit += agg->lmax;
list_move_tail(&cl->alist, &agg->active);
@@ -1415,6 +1416,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl = (struct qfq_class *)arg;
+ if (list_empty(&cl->alist))
+ return;
qfq_deactivate_class(q, cl);
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 9ed197e96396..b912ad99aa15 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -631,6 +631,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
struct red_parms *p = NULL;
struct sk_buff *to_free = NULL;
struct sk_buff *tail = NULL;
+ unsigned int maxflows;
+ unsigned int quantum;
+ unsigned int divisor;
+ int perturb_period;
+ u8 headdrop;
+ u8 maxdepth;
+ int limit;
+ u8 flags;
+
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
@@ -652,39 +661,64 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
if (!p)
return -ENOMEM;
}
- if (ctl->limit == 1) {
- NL_SET_ERR_MSG_MOD(extack, "invalid limit");
- return -EINVAL;
- }
+
sch_tree_lock(sch);
+
+ limit = q->limit;
+ divisor = q->divisor;
+ headdrop = q->headdrop;
+ maxdepth = q->maxdepth;
+ maxflows = q->maxflows;
+ perturb_period = q->perturb_period;
+ quantum = q->quantum;
+ flags = q->flags;
+
+ /* update and validate configuration */
if (ctl->quantum)
- q->quantum = ctl->quantum;
- WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
+ quantum = ctl->quantum;
+ perturb_period = ctl->perturb_period * HZ;
if (ctl->flows)
- q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+ maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
if (ctl->divisor) {
- q->divisor = ctl->divisor;
- q->maxflows = min_t(u32, q->maxflows, q->divisor);
+ divisor = ctl->divisor;
+ maxflows = min_t(u32, maxflows, divisor);
}
if (ctl_v1) {
if (ctl_v1->depth)
- q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
+ maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
if (p) {
- swap(q->red_parms, p);
- red_set_parms(q->red_parms,
+ red_set_parms(p,
ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog,
ctl_v1->Plog, ctl_v1->Scell_log,
NULL,
ctl_v1->max_P);
}
- q->flags = ctl_v1->flags;
- q->headdrop = ctl_v1->headdrop;
+ flags = ctl_v1->flags;
+ headdrop = ctl_v1->headdrop;
}
if (ctl->limit) {
- q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
- q->maxflows = min_t(u32, q->maxflows, q->limit);
+ limit = min_t(u32, ctl->limit, maxdepth * maxflows);
+ maxflows = min_t(u32, maxflows, limit);
}
+ if (limit == 1) {
+ sch_tree_unlock(sch);
+ kfree(p);
+ NL_SET_ERR_MSG_MOD(extack, "invalid limit");
+ return -EINVAL;
+ }
+
+ /* commit configuration */
+ q->limit = limit;
+ q->divisor = divisor;
+ q->headdrop = headdrop;
+ q->maxdepth = maxdepth;
+ q->maxflows = maxflows;
+ WRITE_ONCE(q->perturb_period, perturb_period);
+ q->quantum = quantum;
+ q->flags = flags;
+ if (p)
+ swap(q->red_parms, p);
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit) {
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 5da599ff84a9..d18a72df3654 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -7,10 +7,10 @@ menuconfig IP_SCTP
tristate "The SCTP Protocol"
depends on INET
depends on IPV6 || IPV6=n
+ select CRC32
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA1
- select LIBCRC32C
select NET_UDP_TUNNEL
help
Stream Control Transmission Protocol
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 36ee34f483d7..53725ee7ba06 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -72,8 +72,9 @@
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
-static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
- size_t msg_len);
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ long *timeo_p, size_t msg_len);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -1828,7 +1829,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len);
if (err)
goto err;
if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
@@ -9214,8 +9215,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
/* Helper function to wait for space in the sndbuf. */
-static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
- size_t msg_len)
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ long *timeo_p, size_t msg_len)
{
struct sock *sk = asoc->base.sk;
long current_timeo = *timeo_p;
@@ -9225,7 +9227,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
- /* Increment the association's refcnt. */
+ /* Increment the transport and association's refcnt. */
+ if (transport)
+ sctp_transport_hold(transport);
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
@@ -9234,7 +9238,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
TASK_INTERRUPTIBLE);
if (asoc->base.dead)
goto do_dead;
- if (!*timeo_p)
+ if ((!*timeo_p) || (transport && transport->dead))
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
goto do_error;
@@ -9259,7 +9263,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
out:
finish_wait(&asoc->wait, &wait);
- /* Release the association's refcnt. */
+ /* Release the transport and association's refcnt. */
+ if (transport)
+ sctp_transport_put(transport);
sctp_association_put(asoc);
return err;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 59675f6d9e7d..6946c1462793 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -117,6 +117,8 @@ fail:
*/
void sctp_transport_free(struct sctp_transport *transport)
{
+ transport->dead = 1;
+
/* Try to delete the heartbeat timer. */
if (timer_delete(&transport->hb_timer))
sctp_transport_put(transport);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 50c2e0846ea4..18be6ff4c3db 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1046,6 +1046,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
if (imp == TIPC_SYSTEM_IMPORTANCE) {
pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
+ __skb_queue_purge(list);
return -ENOBUFS;
}
rc = link_schedule_user(l, hdr);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index cb86b0bf9a53..a3ccb3135e51 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -852,6 +852,11 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
return do_tls_setsockopt(sk, optname, optval, optlen);
}
+static int tls_disconnect(struct sock *sk, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
struct tls_context *tls_ctx_create(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -947,6 +952,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_BASE][TLS_BASE] = *base;
prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
+ prot[TLS_BASE][TLS_BASE].disconnect = tls_disconnect;
prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 33d861c04ebd..3ce7b54003c2 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -522,7 +522,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
case INAT_PFX_REPNE:
if (modrm == 0xca)
/* eretu/erets */
- insn->type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_SYSRET;
break;
default:
if (modrm == 0xca)
@@ -535,11 +535,15 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
insn->type = INSN_JUMP_CONDITIONAL;
- } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
- op2 == 0x35) {
+ } else if (op2 == 0x05 || op2 == 0x34) {
- /* sysenter, sysret */
- insn->type = INSN_CONTEXT_SWITCH;
+ /* syscall, sysenter */
+ insn->type = INSN_SYSCALL;
+
+ } else if (op2 == 0x07 || op2 == 0x35) {
+
+ /* sysret, sysexit */
+ insn->type = INSN_SYSRET;
} else if (op2 == 0x0b || op2 == 0xb9) {
@@ -676,7 +680,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
case 0xca: /* retf */
case 0xcb: /* retf */
- insn->type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_SYSRET;
break;
case 0xe0: /* loopne */
@@ -721,7 +725,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
} else if (modrm_reg == 5) {
/* jmpf */
- insn->type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_SYSRET;
} else if (modrm_reg == 6) {
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 403e587676f1..06ca4a2659a4 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -126,7 +126,7 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
- if (reloc_type(text_reloc) == R_X86_64_PC32) {
+ if (!file->ignore_unreachables && reloc_type(text_reloc) == R_X86_64_PC32) {
WARN_INSN(insn, "ignoring unreachables due to jump table quirk");
file->ignore_unreachables = true;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 4a1f6c3169b3..b649049b6a11 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3505,6 +3505,34 @@ next_orig:
return next_insn_same_sec(file, alt_group->orig_group->last_insn);
}
+static bool skip_alt_group(struct instruction *insn)
+{
+ struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
+
+ /* ANNOTATE_IGNORE_ALTERNATIVE */
+ if (insn->alt_group && insn->alt_group->ignore)
+ return true;
+
+ /*
+ * For NOP patched with CLAC/STAC, only follow the latter to avoid
+ * impossible code paths combining patched CLAC with unpatched STAC
+ * or vice versa.
+ *
+ * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
+ * requested not to do that to avoid hurting .s file readability
+ * around CLAC/STAC alternative sites.
+ */
+
+ if (!alt_insn)
+ return false;
+
+ /* Don't override ASM_{CLAC,STAC}_UNSAFE */
+ if (alt_insn->alt_group && alt_insn->alt_group->ignore)
+ return false;
+
+ return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
+}
+
/*
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
@@ -3625,7 +3653,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
}
}
- if (insn->alt_group && insn->alt_group->ignore)
+ if (skip_alt_group(insn))
return 0;
if (handle_insn_ops(insn, next_insn, &state))
@@ -3684,14 +3712,20 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
- case INSN_CONTEXT_SWITCH:
- if (func) {
- if (!next_insn || !next_insn->hint) {
- WARN_INSN(insn, "unsupported instruction in callable function");
- return 1;
- }
- break;
+ case INSN_SYSCALL:
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
+ }
+
+ break;
+
+ case INSN_SYSRET:
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
}
+
return 0;
case INSN_STAC:
@@ -3886,6 +3920,12 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
WARN_INSN(insn, "RET before UNTRAIN");
return 1;
+ case INSN_SYSCALL:
+ break;
+
+ case INSN_SYSRET:
+ return 0;
+
case INSN_NOP:
if (insn->retpoline_safe)
return 0;
@@ -3895,6 +3935,9 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
break;
}
+ if (insn->dead_end)
+ return 0;
+
if (!next) {
WARN_INSN(insn, "teh end!");
return 1;
diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index 089a1acc48a8..01ef6f415adf 100644
--- a/tools/objtool/include/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -19,7 +19,8 @@ enum insn_type {
INSN_CALL,
INSN_CALL_DYNAMIC,
INSN_RETURN,
- INSN_CONTEXT_SWITCH,
+ INSN_SYSCALL,
+ INSN_SYSRET,
INSN_BUG,
INSN_NOP,
INSN_STAC,
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index da53a709773a..c176487356e6 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -809,6 +809,10 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
test.log.extend(parse_diagnostic(lines))
if test.name != "" and not peek_test_name_match(lines, test):
test.add_error(printer, 'missing subtest result line!')
+ elif not lines:
+ print_log(test.log, printer)
+ test.status = TestStatus.NO_TESTS
+ test.add_error(printer, 'No more test results!')
else:
parse_test_result(lines, test, expected_num, printer)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 5ff4f6ffd873..bbba921e0eac 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -371,8 +371,8 @@ class KUnitParserTest(unittest.TestCase):
"""
result = kunit_parser.parse_run_tests(output.splitlines(), stdout)
# Missing test results after test plan should alert a suspected test crash.
- self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status)
- self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, crashed=1, errors=1))
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=2))
def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
return kunit_parser.LineStream(enumerate(strs, start=1))
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 1cf82acb2a3e..0ab4b53bb4f3 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -24,4 +24,10 @@ static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
+static inline unsigned long free_reserved_area(void *start, void *end,
+ int poison, const char *s)
+{
+ return 0;
+}
+
#endif
diff --git a/tools/testing/memblock/linux/mutex.h b/tools/testing/memblock/linux/mutex.h
new file mode 100644
index 000000000000..ae3f497165d6
--- /dev/null
+++ b/tools/testing/memblock/linux/mutex.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MUTEX_H
+#define _MUTEX_H
+
+#define DEFINE_MUTEX(name) int name
+
+static inline void dummy_mutex_guard(int *name)
+{
+}
+
+#define guard(mutex) \
+ dummy_##mutex##_guard
+
+#endif /* _MUTEX_H */ \ No newline at end of file
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index cb24124ac5b9..674aaa02e396 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -4,7 +4,6 @@ gpiogpio-hammer
gpioinclude/
gpiolsgpio
kselftest_install/
-tpm2/SpaceTest.log
# Python bytecode and cache
__pycache__/
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
index 5680befae8c6..5e713ef7caa3 100644
--- a/tools/testing/selftests/bpf/config.x86_64
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -39,7 +39,6 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPUSETS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_BLAKE2B=y
CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
index 115287ba441b..0703e987df89 100644
--- a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
@@ -25,8 +25,11 @@ static void *spin_lock_thread(void *arg)
while (!READ_ONCE(skip)) {
err = bpf_prog_test_run_opts(prog_fd, &topts);
- ASSERT_OK(err, "test_run");
- ASSERT_OK(topts.retval, "test_run retval");
+ if (err || topts.retval) {
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+ break;
+ }
}
pthread_exit(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock.c b/tools/testing/selftests/bpf/progs/res_spin_lock.c
index b33385dfbd35..22c4fb8b9266 100644
--- a/tools/testing/selftests/bpf/progs/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock.c
@@ -38,13 +38,14 @@ int res_spin_lock_test(struct __sk_buff *ctx)
r = bpf_res_spin_lock(&elem1->lock);
if (r)
return r;
- if (!bpf_res_spin_lock(&elem2->lock)) {
+ r = bpf_res_spin_lock(&elem2->lock);
+ if (!r) {
bpf_res_spin_unlock(&elem2->lock);
bpf_res_spin_unlock(&elem1->lock);
return -1;
}
bpf_res_spin_unlock(&elem1->lock);
- return 0;
+ return r != -EDEADLK;
}
SEC("tc")
@@ -124,12 +125,15 @@ int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
/* Trigger AA, after exhausting entries in the held lock table. This
* time, only the timeout can save us, as AA detection won't succeed.
*/
- if (!bpf_res_spin_lock(locks[34])) {
+ ret = bpf_res_spin_lock(locks[34]);
+ if (!ret) {
bpf_res_spin_unlock(locks[34]);
ret = 1;
goto end;
}
+ ret = ret != -ETIMEDOUT ? 2 : 0;
+
end:
for (i = i - 1; i >= 0; i--)
bpf_res_spin_unlock(locks[i]);
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index 400a696a0d21..a17256d9f88a 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -88,22 +88,32 @@ echo "" > test/cpuset.cpus
# If isolated CPUs have been reserved at boot time (as shown in
# cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-8
# that will be used by this script for testing purpose. If not, some of
-# the tests may fail incorrectly. These pre-isolated CPUs should stay in
-# an isolated state throughout the testing process for now.
+# the tests may fail incorrectly. Wait a bit and retry again just in case
+# these isolated CPUs are leftover from previous run and have just been
+# cleaned up earlier in this script.
+#
+# These pre-isolated CPUs should stay in an isolated state throughout the
+# testing process for now.
#
BOOT_ISOLCPUS=$(cat $CGROUP2/cpuset.cpus.isolated)
+[[ -n "$BOOT_ISOLCPUS" ]] && {
+ sleep 0.5
+ BOOT_ISOLCPUS=$(cat $CGROUP2/cpuset.cpus.isolated)
+}
if [[ -n "$BOOT_ISOLCPUS" ]]
then
[[ $(echo $BOOT_ISOLCPUS | sed -e "s/[,-].*//") -le 8 ]] &&
skip_test "Pre-isolated CPUs ($BOOT_ISOLCPUS) overlap CPUs to be tested"
echo "Pre-isolated CPUs: $BOOT_ISOLCPUS"
fi
+
cleanup()
{
online_cpus
cd $CGROUP2
- rmdir A1/A2/A3 A1/A2 A1 B1 > /dev/null 2>&1
- rmdir test > /dev/null 2>&1
+ rmdir A1/A2/A3 A1/A2 A1 B1 test/A1 test/B1 test > /dev/null 2>&1
+ rmdir rtest/p1/c11 rtest/p1/c12 rtest/p2/c21 \
+ rtest/p2/c22 rtest/p1 rtest/p2 rtest > /dev/null 2>&1
[[ -n "$SCHED_DEBUG" ]] &&
echo "$SCHED_DEBUG" > /sys/kernel/debug/sched/verbose
}
@@ -173,14 +183,22 @@ test_add_proc()
#
# Cgroup test hierarchy
#
-# root -- A1 -- A2 -- A3
-# +- B1
+# root
+# |
+# +------+------+
+# | |
+# A1 B1
+# |
+# A2
+# |
+# A3
#
# P<v> = set cpus.partition (0:member, 1:root, 2:isolated)
# C<l> = add cpu-list to cpuset.cpus
# X<l> = add cpu-list to cpuset.cpus.exclusive
# S<p> = use prefix in subtree_control
# T = put a task into cgroup
+# CX<l> = add cpu-list to both cpuset.cpus and cpuset.cpus.exclusive
# O<c>=<v> = Write <v> to CPU online file of <c>
#
# ECPUs - effective CPUs of cpusets
@@ -207,130 +225,129 @@ TEST_MATRIX=(
" C0-1:P1 . . C2-3 S+:C4-5 . . . 0 A1:4-5"
" C0-1 . . C2-3:P1 . . . C2 0 "
" C0-1 . . C2-3:P1 . . . C4-5 0 B1:4-5"
- "C0-3:P1:S+ C2-3:P1 . . . . . . 0 A1:0-1,A2:2-3"
- "C0-3:P1:S+ C2-3:P1 . . C1-3 . . . 0 A1:1,A2:2-3"
- "C2-3:P1:S+ C3:P1 . . C3 . . . 0 A1:,A2:3 A1:P1,A2:P1"
- "C2-3:P1:S+ C3:P1 . . C3 P0 . . 0 A1:3,A2:3 A1:P1,A2:P0"
- "C2-3:P1:S+ C2:P1 . . C2-4 . . . 0 A1:3-4,A2:2"
- "C2-3:P1:S+ C3:P1 . . C3 . . C0-2 0 A1:,B1:0-2 A1:P1,A2:P1"
- "$SETUP_A123_PARTITIONS . C2-3 . . . 0 A1:,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
+ "C0-3:P1:S+ C2-3:P1 . . . . . . 0 A1:0-1|A2:2-3|XA2:2-3"
+ "C0-3:P1:S+ C2-3:P1 . . C1-3 . . . 0 A1:1|A2:2-3|XA2:2-3"
+ "C2-3:P1:S+ C3:P1 . . C3 . . . 0 A1:|A2:3|XA2:3 A1:P1|A2:P1"
+ "C2-3:P1:S+ C3:P1 . . C3 P0 . . 0 A1:3|A2:3 A1:P1|A2:P0"
+ "C2-3:P1:S+ C2:P1 . . C2-4 . . . 0 A1:3-4|A2:2"
+ "C2-3:P1:S+ C3:P1 . . C3 . . C0-2 0 A1:|B1:0-2 A1:P1|A2:P1"
+ "$SETUP_A123_PARTITIONS . C2-3 . . . 0 A1:|A2:2|A3:3 A1:P1|A2:P1|A3:P1"
# CPU offlining cases:
- " C0-1 . . C2-3 S+ C4-5 . O2=0 0 A1:0-1,B1:3"
- "C0-3:P1:S+ C2-3:P1 . . O2=0 . . . 0 A1:0-1,A2:3"
- "C0-3:P1:S+ C2-3:P1 . . O2=0 O2=1 . . 0 A1:0-1,A2:2-3"
- "C0-3:P1:S+ C2-3:P1 . . O1=0 . . . 0 A1:0,A2:2-3"
- "C0-3:P1:S+ C2-3:P1 . . O1=0 O1=1 . . 0 A1:0-1,A2:2-3"
- "C2-3:P1:S+ C3:P1 . . O3=0 O3=1 . . 0 A1:2,A2:3 A1:P1,A2:P1"
- "C2-3:P1:S+ C3:P2 . . O3=0 O3=1 . . 0 A1:2,A2:3 A1:P1,A2:P2"
- "C2-3:P1:S+ C3:P1 . . O2=0 O2=1 . . 0 A1:2,A2:3 A1:P1,A2:P1"
- "C2-3:P1:S+ C3:P2 . . O2=0 O2=1 . . 0 A1:2,A2:3 A1:P1,A2:P2"
- "C2-3:P1:S+ C3:P1 . . O2=0 . . . 0 A1:,A2:3 A1:P1,A2:P1"
- "C2-3:P1:S+ C3:P1 . . O3=0 . . . 0 A1:2,A2: A1:P1,A2:P1"
- "C2-3:P1:S+ C3:P1 . . T:O2=0 . . . 0 A1:3,A2:3 A1:P1,A2:P-1"
- "C2-3:P1:S+ C3:P1 . . . T:O3=0 . . 0 A1:2,A2:2 A1:P1,A2:P-1"
- "$SETUP_A123_PARTITIONS . O1=0 . . . 0 A1:,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . O2=0 . . . 0 A1:1,A2:,A3:3 A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . O3=0 . . . 0 A1:1,A2:2,A3: A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . T:O1=0 . . . 0 A1:2-3,A2:2-3,A3:3 A1:P1,A2:P-1,A3:P-1"
- "$SETUP_A123_PARTITIONS . . T:O2=0 . . 0 A1:1,A2:3,A3:3 A1:P1,A2:P1,A3:P-1"
- "$SETUP_A123_PARTITIONS . . . T:O3=0 . 0 A1:1,A2:2,A3:2 A1:P1,A2:P1,A3:P-1"
- "$SETUP_A123_PARTITIONS . T:O1=0 O1=1 . . 0 A1:1,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . . T:O2=0 O2=1 . 0 A1:1,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . . . T:O3=0 O3=1 0 A1:1,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . T:O1=0 O2=0 O1=1 . 0 A1:1,A2:,A3:3 A1:P1,A2:P1,A3:P1"
- "$SETUP_A123_PARTITIONS . T:O1=0 O2=0 O2=1 . 0 A1:2-3,A2:2-3,A3:3 A1:P1,A2:P-1,A3:P-1"
+ " C0-1 . . C2-3 S+ C4-5 . O2=0 0 A1:0-1|B1:3"
+ "C0-3:P1:S+ C2-3:P1 . . O2=0 . . . 0 A1:0-1|A2:3"
+ "C0-3:P1:S+ C2-3:P1 . . O2=0 O2=1 . . 0 A1:0-1|A2:2-3"
+ "C0-3:P1:S+ C2-3:P1 . . O1=0 . . . 0 A1:0|A2:2-3"
+ "C0-3:P1:S+ C2-3:P1 . . O1=0 O1=1 . . 0 A1:0-1|A2:2-3"
+ "C2-3:P1:S+ C3:P1 . . O3=0 O3=1 . . 0 A1:2|A2:3 A1:P1|A2:P1"
+ "C2-3:P1:S+ C3:P2 . . O3=0 O3=1 . . 0 A1:2|A2:3 A1:P1|A2:P2"
+ "C2-3:P1:S+ C3:P1 . . O2=0 O2=1 . . 0 A1:2|A2:3 A1:P1|A2:P1"
+ "C2-3:P1:S+ C3:P2 . . O2=0 O2=1 . . 0 A1:2|A2:3 A1:P1|A2:P2"
+ "C2-3:P1:S+ C3:P1 . . O2=0 . . . 0 A1:|A2:3 A1:P1|A2:P1"
+ "C2-3:P1:S+ C3:P1 . . O3=0 . . . 0 A1:2|A2: A1:P1|A2:P1"
+ "C2-3:P1:S+ C3:P1 . . T:O2=0 . . . 0 A1:3|A2:3 A1:P1|A2:P-1"
+ "C2-3:P1:S+ C3:P1 . . . T:O3=0 . . 0 A1:2|A2:2 A1:P1|A2:P-1"
+ "$SETUP_A123_PARTITIONS . O1=0 . . . 0 A1:|A2:2|A3:3 A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . O2=0 . . . 0 A1:1|A2:|A3:3 A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . O3=0 . . . 0 A1:1|A2:2|A3: A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . T:O1=0 . . . 0 A1:2-3|A2:2-3|A3:3 A1:P1|A2:P-1|A3:P-1"
+ "$SETUP_A123_PARTITIONS . . T:O2=0 . . 0 A1:1|A2:3|A3:3 A1:P1|A2:P1|A3:P-1"
+ "$SETUP_A123_PARTITIONS . . . T:O3=0 . 0 A1:1|A2:2|A3:2 A1:P1|A2:P1|A3:P-1"
+ "$SETUP_A123_PARTITIONS . T:O1=0 O1=1 . . 0 A1:1|A2:2|A3:3 A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . . T:O2=0 O2=1 . 0 A1:1|A2:2|A3:3 A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . . . T:O3=0 O3=1 0 A1:1|A2:2|A3:3 A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . T:O1=0 O2=0 O1=1 . 0 A1:1|A2:|A3:3 A1:P1|A2:P1|A3:P1"
+ "$SETUP_A123_PARTITIONS . T:O1=0 O2=0 O2=1 . 0 A1:2-3|A2:2-3|A3:3 A1:P1|A2:P-1|A3:P-1"
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
# ------ ------ ------ ------ ------ ------ ------ ------ ---- ----- ------ --------
#
# Remote partition and cpuset.cpus.exclusive tests
#
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 . . . 0 A1:0-3,A2:1-3,A3:2-3,XA1:2-3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3:P2 . . 0 A1:0-1,A2:2-3,A3:2-3 A1:P0,A2:P2 2-3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X3:P2 . . 0 A1:0-2,A2:3,A3:3 A1:P0,A2:P2 3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2 . 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:C3 . 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
- " C0-3:S+ C1-3:S+ C2-3 C2-3 . . . P2 0 A1:0-3,A2:1-3,A3:2-3,B1:2-3 A1:P0,A3:P0,B1:P-2"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 . . . 0 A1:0-3|A2:1-3|A3:2-3|XA1:2-3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3:P2 . . 0 A1:0-1|A2:2-3|A3:2-3 A1:P0|A2:P2 2-3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X3:P2 . . 0 A1:0-2|A2:3|A3:3 A1:P0|A2:P2 3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2 . 0 A1:0-1|A2:1|A3:2-3 A1:P0|A3:P2 2-3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:C3 . 0 A1:0-1|A2:1|A3:2-3 A1:P0|A3:P2 2-3"
+ " C0-3:S+ C1-3:S+ C2-3 C2-3 . . . P2 0 A1:0-3|A2:1-3|A3:2-3|B1:2-3 A1:P0|A3:P0|B1:P-2"
" C0-3:S+ C1-3:S+ C2-3 C4-5 . . . P2 0 B1:4-5 B1:P2 4-5"
- " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2 0 A3:2-3,B1:4 A3:P2,B1:P2 2-4"
- " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2:C1-3 P2 0 A3:2-3,B1:4 A3:P2,B1:P2 2-4"
- " C0-3:S+ C1-3:S+ C2-3 C4 X1-3 X1-3:P2 P2 . 0 A2:1,A3:2-3 A2:P2,A3:P2 1-3"
- " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2:C4-5 0 A3:2-3,B1:4-5 A3:P2,B1:P2 2-5"
- " C4:X0-3:S+ X1-3:S+ X2-3 . . P2 . . 0 A1:4,A2:1-3,A3:1-3 A2:P2 1-3"
- " C4:X0-3:S+ X1-3:S+ X2-3 . . . P2 . 0 A1:4,A2:4,A3:2-3 A3:P2 2-3"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2 0 A3:2-3|B1:4 A3:P2|B1:P2 2-4"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2:C1-3 P2 0 A3:2-3|B1:4 A3:P2|B1:P2 2-4"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X1-3 X1-3:P2 P2 . 0 A2:1|A3:2-3 A2:P2|A3:P2 1-3"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2:C4-5 0 A3:2-3|B1:4-5 A3:P2|B1:P2 2-5"
+ " C4:X0-3:S+ X1-3:S+ X2-3 . . P2 . . 0 A1:4|A2:1-3|A3:1-3 A2:P2 1-3"
+ " C4:X0-3:S+ X1-3:S+ X2-3 . . . P2 . 0 A1:4|A2:4|A3:2-3 A3:P2 2-3"
# Nested remote/local partition tests
- " C0-3:S+ C1-3:S+ C2-3 C4-5 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4-5 \
- A1:P0,A2:P1,A3:P2,B1:P1 2-3"
- " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4 \
- A1:P0,A2:P1,A3:P2,B1:P1 2-4,2-3"
- " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 . P1 0 A1:0-1,A2:2-3,A3:2-3,B1:4 \
- A1:P0,A2:P1,A3:P0,B1:P1"
- " C0-3:S+ C1-3:S+ C3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:2,A3:3,B1:4 \
- A1:P0,A2:P1,A3:P2,B1:P1 2-4,3"
- " C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X4:P1 . 0 A1:0-1,A2:2-3,A3:4 \
- A1:P0,A2:P2,A3:P1 2-4,2-3"
- " C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X3-4:P1 . 0 A1:0-1,A2:2,A3:3-4 \
- A1:P0,A2:P2,A3:P1 2"
+ " C0-3:S+ C1-3:S+ C2-3 C4-5 X2-3 X2-3:P1 P2 P1 0 A1:0-1|A2:|A3:2-3|B1:4-5 \
+ A1:P0|A2:P1|A3:P2|B1:P1 2-3"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1|A2:|A3:2-3|B1:4 \
+ A1:P0|A2:P1|A3:P2|B1:P1 2-4|2-3"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 . P1 0 A1:0-1|A2:2-3|A3:2-3|B1:4 \
+ A1:P0|A2:P1|A3:P0|B1:P1"
+ " C0-3:S+ C1-3:S+ C3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1|A2:2|A3:3|B1:4 \
+ A1:P0|A2:P1|A3:P2|B1:P1 2-4|3"
+ " C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X4:P1 . 0 A1:0-1|A2:2-3|A3:4 \
+ A1:P0|A2:P2|A3:P1 2-4|2-3"
+ " C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X3-4:P1 . 0 A1:0-1|A2:2|A3:3-4 \
+ A1:P0|A2:P2|A3:P1 2"
" C0-4:X2-4:S+ C1-4:X2-4:S+:P2 C2-4:X4:P1 \
- . . X5 . . 0 A1:0-4,A2:1-4,A3:2-4 \
- A1:P0,A2:P-2,A3:P-1"
+ . . X5 . . 0 A1:0-4|A2:1-4|A3:2-4 \
+ A1:P0|A2:P-2|A3:P-1 ."
" C0-4:X2-4:S+ C1-4:X2-4:S+:P2 C2-4:X4:P1 \
- . . . X1 . 0 A1:0-1,A2:2-4,A3:2-4 \
- A1:P0,A2:P2,A3:P-1 2-4"
+ . . . X1 . 0 A1:0-1|A2:2-4|A3:2-4 \
+ A1:P0|A2:P2|A3:P-1 2-4"
# Remote partition offline tests
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:O2=0 . 0 A1:0-1,A2:1,A3:3 A1:P0,A3:P2 2-3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:O2=0 O2=1 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
- " C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 P2:O3=0 . 0 A1:0-2,A2:1-2,A3: A1:P0,A3:P2 3"
- " C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 T:P2:O3=0 . 0 A1:0-2,A2:1-2,A3:1-2 A1:P0,A3:P-2 3,"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:O2=0 . 0 A1:0-1|A2:1|A3:3 A1:P0|A3:P2 2-3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:O2=0 O2=1 0 A1:0-1|A2:1|A3:2-3 A1:P0|A3:P2 2-3"
+ " C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 P2:O3=0 . 0 A1:0-2|A2:1-2|A3: A1:P0|A3:P2 3"
+ " C0-3:S+ C1-3:S+ C3 . X2-3 X2-3 T:P2:O3=0 . 0 A1:0-2|A2:1-2|A3:1-2 A1:P0|A3:P-2 3|"
# An invalidated remote partition cannot self-recover from hotplug
- " C0-3:S+ C1-3:S+ C2 . X2-3 X2-3 T:P2:O2=0 O2=1 0 A1:0-3,A2:1-3,A3:2 A1:P0,A3:P-2"
+ " C0-3:S+ C1-3:S+ C2 . X2-3 X2-3 T:P2:O2=0 O2=1 0 A1:0-3|A2:1-3|A3:2 A1:P0|A3:P-2 ."
# cpus.exclusive.effective clearing test
- " C0-3:S+ C1-3:S+ C2 . X2-3:X . . . 0 A1:0-3,A2:1-3,A3:2,XA1:"
+ " C0-3:S+ C1-3:S+ C2 . X2-3:X . . . 0 A1:0-3|A2:1-3|A3:2|XA1:"
# Invalid to valid remote partition transition test
- " C0-3:S+ C1-3 . . . X3:P2 . . 0 A1:0-3,A2:1-3,XA2: A2:P-2"
+ " C0-3:S+ C1-3 . . . X3:P2 . . 0 A1:0-3|A2:1-3|XA2: A2:P-2 ."
" C0-3:S+ C1-3:X3:P2
- . . X2-3 P2 . . 0 A1:0-2,A2:3,XA2:3 A2:P2 3"
+ . . X2-3 P2 . . 0 A1:0-2|A2:3|XA2:3 A2:P2 3"
# Invalid to valid local partition direct transition tests
- " C1-3:S+:P2 X4:P2 . . . . . . 0 A1:1-3,XA1:1-3,A2:1-3:XA2: A1:P2,A2:P-2 1-3"
- " C1-3:S+:P2 X4:P2 . . . X3:P2 . . 0 A1:1-2,XA1:1-3,A2:3:XA2:3 A1:P2,A2:P2 1-3"
- " C0-3:P2 . . C4-6 C0-4 . . . 0 A1:0-4,B1:4-6 A1:P-2,B1:P0"
- " C0-3:P2 . . C4-6 C0-4:C0-3 . . . 0 A1:0-3,B1:4-6 A1:P2,B1:P0 0-3"
- " C0-3:P2 . . C3-5:C4-5 . . . . 0 A1:0-3,B1:4-5 A1:P2,B1:P0 0-3"
+ " C1-3:S+:P2 X4:P2 . . . . . . 0 A1:1-3|XA1:1-3|A2:1-3:XA2: A1:P2|A2:P-2 1-3"
+ " C1-3:S+:P2 X4:P2 . . . X3:P2 . . 0 A1:1-2|XA1:1-3|A2:3:XA2:3 A1:P2|A2:P2 1-3"
+ " C0-3:P2 . . C4-6 C0-4 . . . 0 A1:0-4|B1:4-6 A1:P-2|B1:P0"
+ " C0-3:P2 . . C4-6 C0-4:C0-3 . . . 0 A1:0-3|B1:4-6 A1:P2|B1:P0 0-3"
# Local partition invalidation tests
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
- . . . . . 0 A1:1,A2:2,A3:3 A1:P2,A2:P2,A3:P2 1-3"
+ . . . . . 0 A1:1|A2:2|A3:3 A1:P2|A2:P2|A3:P2 1-3"
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
- . . X4 . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
+ . . X4 . . 0 A1:1-3|A2:1-3|A3:2-3|XA2:|XA3: A1:P2|A2:P-2|A3:P-2 1-3"
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
- . . C4:X . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
+ . . C4:X . . 0 A1:1-3|A2:1-3|A3:2-3|XA2:|XA3: A1:P2|A2:P-2|A3:P-2 1-3"
# Local partition CPU change tests
- " C0-5:S+:P2 C4-5:S+:P1 . . . C3-5 . . 0 A1:0-2,A2:3-5 A1:P2,A2:P1 0-2"
- " C0-5:S+:P2 C4-5:S+:P1 . . C1-5 . . . 0 A1:1-3,A2:4-5 A1:P2,A2:P1 1-3"
+ " C0-5:S+:P2 C4-5:S+:P1 . . . C3-5 . . 0 A1:0-2|A2:3-5 A1:P2|A2:P1 0-2"
+ " C0-5:S+:P2 C4-5:S+:P1 . . C1-5 . . . 0 A1:1-3|A2:4-5 A1:P2|A2:P1 1-3"
# cpus_allowed/exclusive_cpus update tests
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . X:C4 . P2 . 0 A1:4,A2:4,XA2:,XA3:,A3:4 \
- A1:P0,A3:P-2"
+ . X:C4 . P2 . 0 A1:4|A2:4|XA2:|XA3:|A3:4 \
+ A1:P0|A3:P-2 ."
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . X1 . P2 . 0 A1:0-3,A2:1-3,XA1:1,XA2:,XA3:,A3:2-3 \
- A1:P0,A3:P-2"
+ . X1 . P2 . 0 A1:0-3|A2:1-3|XA1:1|XA2:|XA3:|A3:2-3 \
+ A1:P0|A3:P-2 ."
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . . X3 P2 . 0 A1:0-2,A2:1-2,XA2:3,XA3:3,A3:3 \
- A1:P0,A3:P2 3"
+ . . X3 P2 . 0 A1:0-2|A2:1-2|XA2:3|XA3:3|A3:3 \
+ A1:P0|A3:P2 3"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
- . . X3 . . 0 A1:0-3,A2:1-3,XA2:3,XA3:3,A3:2-3 \
- A1:P0,A3:P-2"
+ . . X3 . . 0 A1:0-2|A2:1-2|XA2:3|XA3:3|A3:3|XA3:3 \
+ A1:P0|A3:P2 3"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
- . X4 . . . 0 A1:0-3,A2:1-3,A3:2-3,XA1:4,XA2:,XA3 \
- A1:P0,A3:P-2"
+ . X4 . . . 0 A1:0-3|A2:1-3|A3:2-3|XA1:4|XA2:|XA3 \
+ A1:P0|A3:P-2"
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
# ------ ------ ------ ------ ------ ------ ------ ------ ---- ----- ------ --------
@@ -339,68 +356,127 @@ TEST_MATRIX=(
#
# Adding CPUs to partition root that are not in parent's
# cpuset.cpus is allowed, but those extra CPUs are ignored.
- "C2-3:P1:S+ C3:P1 . . . C2-4 . . 0 A1:,A2:2-3 A1:P1,A2:P1"
+ "C2-3:P1:S+ C3:P1 . . . C2-4 . . 0 A1:|A2:2-3 A1:P1|A2:P1"
# Taking away all CPUs from parent or itself if there are tasks
# will make the partition invalid.
- "C2-3:P1:S+ C3:P1 . . T C2-3 . . 0 A1:2-3,A2:2-3 A1:P1,A2:P-1"
- " C3:P1:S+ C3 . . T P1 . . 0 A1:3,A2:3 A1:P1,A2:P-1"
- "$SETUP_A123_PARTITIONS . T:C2-3 . . . 0 A1:2-3,A2:2-3,A3:3 A1:P1,A2:P-1,A3:P-1"
- "$SETUP_A123_PARTITIONS . T:C2-3:C1-3 . . . 0 A1:1,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
+ "C2-3:P1:S+ C3:P1 . . T C2-3 . . 0 A1:2-3|A2:2-3 A1:P1|A2:P-1"
+ " C3:P1:S+ C3 . . T P1 . . 0 A1:3|A2:3 A1:P1|A2:P-1"
+ "$SETUP_A123_PARTITIONS . T:C2-3 . . . 0 A1:2-3|A2:2-3|A3:3 A1:P1|A2:P-1|A3:P-1"
+ "$SETUP_A123_PARTITIONS . T:C2-3:C1-3 . . . 0 A1:1|A2:2|A3:3 A1:P1|A2:P1|A3:P1"
# Changing a partition root to member makes child partitions invalid
- "C2-3:P1:S+ C3:P1 . . P0 . . . 0 A1:2-3,A2:3 A1:P0,A2:P-1"
- "$SETUP_A123_PARTITIONS . C2-3 P0 . . 0 A1:2-3,A2:2-3,A3:3 A1:P1,A2:P0,A3:P-1"
+ "C2-3:P1:S+ C3:P1 . . P0 . . . 0 A1:2-3|A2:3 A1:P0|A2:P-1"
+ "$SETUP_A123_PARTITIONS . C2-3 P0 . . 0 A1:2-3|A2:2-3|A3:3 A1:P1|A2:P0|A3:P-1"
# cpuset.cpus can contains cpus not in parent's cpuset.cpus as long
# as they overlap.
- "C2-3:P1:S+ . . . . C3-4:P1 . . 0 A1:2,A2:3 A1:P1,A2:P1"
+ "C2-3:P1:S+ . . . . C3-4:P1 . . 0 A1:2|A2:3 A1:P1|A2:P1"
# Deletion of CPUs distributed to child cgroup is allowed.
- "C0-1:P1:S+ C1 . C2-3 C4-5 . . . 0 A1:4-5,A2:4-5"
+ "C0-1:P1:S+ C1 . C2-3 C4-5 . . . 0 A1:4-5|A2:4-5"
# To become a valid partition root, cpuset.cpus must overlap parent's
# cpuset.cpus.
- " C0-1:P1 . . C2-3 S+ C4-5:P1 . . 0 A1:0-1,A2:0-1 A1:P1,A2:P-1"
+ " C0-1:P1 . . C2-3 S+ C4-5:P1 . . 0 A1:0-1|A2:0-1 A1:P1|A2:P-1"
# Enabling partition with child cpusets is allowed
- " C0-1:S+ C1 . C2-3 P1 . . . 0 A1:0-1,A2:1 A1:P1"
+ " C0-1:S+ C1 . C2-3 P1 . . . 0 A1:0-1|A2:1 A1:P1"
- # A partition root with non-partition root parent is invalid, but it
+ # A partition root with non-partition root parent is invalid| but it
# can be made valid if its parent becomes a partition root too.
- " C0-1:S+ C1 . C2-3 . P2 . . 0 A1:0-1,A2:1 A1:P0,A2:P-2"
- " C0-1:S+ C1:P2 . C2-3 P1 . . . 0 A1:0,A2:1 A1:P1,A2:P2"
+ " C0-1:S+ C1 . C2-3 . P2 . . 0 A1:0-1|A2:1 A1:P0|A2:P-2"
+ " C0-1:S+ C1:P2 . C2-3 P1 . . . 0 A1:0|A2:1 A1:P1|A2:P2 0-1|1"
# A non-exclusive cpuset.cpus change will invalidate partition and its siblings
- " C0-1:P1 . . C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P-1,B1:P0"
- " C0-1:P1 . . P1:C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P-1,B1:P-1"
- " C0-1 . . P1:C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P0,B1:P-1"
+ " C0-1:P1 . . C2-3 C0-2 . . . 0 A1:0-2|B1:2-3 A1:P-1|B1:P0"
+ " C0-1:P1 . . P1:C2-3 C0-2 . . . 0 A1:0-2|B1:2-3 A1:P-1|B1:P-1"
+ " C0-1 . . P1:C2-3 C0-2 . . . 0 A1:0-2|B1:2-3 A1:P0|B1:P-1"
# cpuset.cpus can overlap with sibling cpuset.cpus.exclusive but not subsumed by it
- " C0-3 . . C4-5 X5 . . . 0 A1:0-3,B1:4-5"
+ " C0-3 . . C4-5 X5 . . . 0 A1:0-3|B1:4-5"
# Child partition root that try to take all CPUs from parent partition
# with tasks will remain invalid.
- " C1-4:P1:S+ P1 . . . . . . 0 A1:1-4,A2:1-4 A1:P1,A2:P-1"
- " C1-4:P1:S+ P1 . . . C1-4 . . 0 A1,A2:1-4 A1:P1,A2:P1"
- " C1-4:P1:S+ P1 . . T C1-4 . . 0 A1:1-4,A2:1-4 A1:P1,A2:P-1"
+ " C1-4:P1:S+ P1 . . . . . . 0 A1:1-4|A2:1-4 A1:P1|A2:P-1"
+ " C1-4:P1:S+ P1 . . . C1-4 . . 0 A1|A2:1-4 A1:P1|A2:P1"
+ " C1-4:P1:S+ P1 . . T C1-4 . . 0 A1:1-4|A2:1-4 A1:P1|A2:P-1"
# Clearing of cpuset.cpus with a preset cpuset.cpus.exclusive shouldn't
# affect cpuset.cpus.exclusive.effective.
- " C1-4:X3:S+ C1:X3 . . . C . . 0 A2:1-4,XA2:3"
+ " C1-4:X3:S+ C1:X3 . . . C . . 0 A2:1-4|XA2:3"
+
+ # cpuset.cpus can contain CPUs that overlap a sibling cpuset with cpus.exclusive
+ # but creating a local partition out of it is not allowed. Similarly and change
+ # in cpuset.cpus of a local partition that overlaps sibling exclusive CPUs will
+ # invalidate it.
+ " CX1-4:S+ CX2-4:P2 . C5-6 . . . P1 0 A1:1|A2:2-4|B1:5-6|XB1:5-6 \
+ A1:P0|A2:P2:B1:P1 2-4"
+ " CX1-4:S+ CX2-4:P2 . C3-6 . . . P1 0 A1:1|A2:2-4|B1:5-6 \
+ A1:P0|A2:P2:B1:P-1 2-4"
+ " CX1-4:S+ CX2-4:P2 . C5-6 . . . P1:C3-6 0 A1:1|A2:2-4|B1:5-6 \
+ A1:P0|A2:P2:B1:P-1 2-4"
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
# ------ ------ ------ ------ ------ ------ ------ ------ ---- ----- ------ --------
# Failure cases:
# A task cannot be added to a partition with no cpu
- "C2-3:P1:S+ C3:P1 . . O2=0:T . . . 1 A1:,A2:3 A1:P1,A2:P1"
+ "C2-3:P1:S+ C3:P1 . . O2=0:T . . . 1 A1:|A2:3 A1:P1|A2:P1"
# Changes to cpuset.cpus.exclusive that violate exclusivity rule is rejected
- " C0-3 . . C4-5 X0-3 . . X3-5 1 A1:0-3,B1:4-5"
+ " C0-3 . . C4-5 X0-3 . . X3-5 1 A1:0-3|B1:4-5"
# cpuset.cpus cannot be a subset of sibling cpuset.cpus.exclusive
- " C0-3 . . C4-5 X3-5 . . . 1 A1:0-3,B1:4-5"
+ " C0-3 . . C4-5 X3-5 . . . 1 A1:0-3|B1:4-5"
+)
+
+#
+# Cpuset controller remote partition test matrix.
+#
+# Cgroup test hierarchy
+#
+# root
+# |
+# rtest (cpuset.cpus.exclusive=1-7)
+# |
+# +------+------+
+# | |
+# p1 p2
+# +--+--+ +--+--+
+# | | | |
+# c11 c12 c21 c22
+#
+# REMOTE_TEST_MATRIX uses the same notational convention as TEST_MATRIX.
+# Only CPUs 1-7 should be used.
+#
+REMOTE_TEST_MATRIX=(
+ # old-p1 old-p2 old-c11 old-c12 old-c21 old-c22
+ # new-p1 new-p2 new-c11 new-c12 new-c21 new-c22 ECPUs Pstate ISOLCPUS
+ # ------ ------ ------- ------- ------- ------- ----- ------ --------
+ " X1-3:S+ X4-6:S+ X1-2 X3 X4-5 X6 \
+ . . P2 P2 P2 P2 c11:1-2|c12:3|c21:4-5|c22:6 \
+ c11:P2|c12:P2|c21:P2|c22:P2 1-6"
+ " CX1-4:S+ . X1-2:P2 C3 . . \
+ . . . C3-4 . . p1:3-4|c11:1-2|c12:3-4 \
+ p1:P0|c11:P2|c12:P0 1-2"
+ " CX1-4:S+ . X1-2:P2 . . . \
+ X2-4 . . . . . p1:1,3-4|c11:2 \
+ p1:P0|c11:P2 2"
+ " CX1-5:S+ . X1-2:P2 X3-5:P1 . . \
+ X2-4 . . . . . p1:1,5|c11:2|c12:3-4 \
+ p1:P0|c11:P2|c12:P1 2"
+ " CX1-4:S+ . X1-2:P2 X3-4:P1 . . \
+ . . X2 . . . p1:1|c11:2|c12:3-4 \
+ p1:P0|c11:P2|c12:P1 2"
+ # p1 as member, will get its effective CPUs from its parent rtest
+ " CX1-4:S+ . X1-2:P2 X3-4:P1 . . \
+ . . X1 CX2-4 . . p1:5-7|c11:1|c12:2-4 \
+ p1:P0|c11:P2|c12:P1 1"
+ " CX1-4:S+ X5-6:P1:S+ . . . . \
+ . . X1-2:P2 X4-5:P1 . X1-7:P2 p1:3|c11:1-2|c12:4:c22:5-6 \
+ p1:P0|p2:P1|c11:P2|c12:P1|c22:P2 \
+ 1-2,4-6|1-2,5-6"
)
#
@@ -453,25 +529,26 @@ set_ctrl_state()
PFILE=$CGRP/cpuset.cpus.partition
CFILE=$CGRP/cpuset.cpus
XFILE=$CGRP/cpuset.cpus.exclusive
- S=$(expr substr $CMD 1 1)
- if [[ $S = S ]]
- then
- PREFIX=${CMD#?}
+ case $CMD in
+ S*) PREFIX=${CMD#?}
COMM="echo ${PREFIX}${CTRL} > $SFILE"
eval $COMM $REDIRECT
- elif [[ $S = X ]]
- then
+ ;;
+ X*)
CPUS=${CMD#?}
COMM="echo $CPUS > $XFILE"
eval $COMM $REDIRECT
- elif [[ $S = C ]]
- then
- CPUS=${CMD#?}
+ ;;
+ CX*)
+ CPUS=${CMD#??}
+ COMM="echo $CPUS > $CFILE; echo $CPUS > $XFILE"
+ eval $COMM $REDIRECT
+ ;;
+ C*) CPUS=${CMD#?}
COMM="echo $CPUS > $CFILE"
eval $COMM $REDIRECT
- elif [[ $S = P ]]
- then
- VAL=${CMD#?}
+ ;;
+ P*) VAL=${CMD#?}
case $VAL in
0) VAL=member
;;
@@ -486,15 +563,17 @@ set_ctrl_state()
esac
COMM="echo $VAL > $PFILE"
eval $COMM $REDIRECT
- elif [[ $S = O ]]
- then
- VAL=${CMD#?}
+ ;;
+ O*) VAL=${CMD#?}
write_cpu_online $VAL
- elif [[ $S = T ]]
- then
- COMM="echo 0 > $TFILE"
+ ;;
+ T*) COMM="echo 0 > $TFILE"
eval $COMM $REDIRECT
- fi
+ ;;
+ *) echo "Unknown command: $CMD"
+ exit 1
+ ;;
+ esac
RET=$?
[[ $RET -ne 0 ]] && {
[[ -n "$SHOWERR" ]] && {
@@ -532,21 +611,18 @@ online_cpus()
}
#
-# Return 1 if the list of effective cpus isn't the same as the initial list.
+# Remove all the test cgroup directories
#
reset_cgroup_states()
{
echo 0 > $CGROUP2/cgroup.procs
online_cpus
- rmdir A1/A2/A3 A1/A2 A1 B1 > /dev/null 2>&1
- pause 0.02
- set_ctrl_state . R-
- pause 0.01
+ rmdir $RESET_LIST > /dev/null 2>&1
}
dump_states()
{
- for DIR in . A1 A1/A2 A1/A2/A3 B1
+ for DIR in $CGROUP_LIST
do
CPUS=$DIR/cpuset.cpus
ECPUS=$DIR/cpuset.cpus.effective
@@ -566,17 +642,33 @@ dump_states()
}
#
+# Set the actual cgroup directory into $CGRP_DIR
+# $1 - cgroup name
+#
+set_cgroup_dir()
+{
+ CGRP_DIR=$1
+ [[ $CGRP_DIR = A2 ]] && CGRP_DIR=A1/A2
+ [[ $CGRP_DIR = A3 ]] && CGRP_DIR=A1/A2/A3
+ [[ $CGRP_DIR = c11 ]] && CGRP_DIR=p1/c11
+ [[ $CGRP_DIR = c12 ]] && CGRP_DIR=p1/c12
+ [[ $CGRP_DIR = c21 ]] && CGRP_DIR=p2/c21
+ [[ $CGRP_DIR = c22 ]] && CGRP_DIR=p2/c22
+}
+
+#
# Check effective cpus
-# $1 - check string, format: <cgroup>:<cpu-list>[,<cgroup>:<cpu-list>]*
+# $1 - check string, format: <cgroup>:<cpu-list>[|<cgroup>:<cpu-list>]*
#
check_effective_cpus()
{
CHK_STR=$1
- for CHK in $(echo $CHK_STR | sed -e "s/,/ /g")
+ for CHK in $(echo $CHK_STR | sed -e "s/|/ /g")
do
set -- $(echo $CHK | sed -e "s/:/ /g")
CGRP=$1
- CPUS=$2
+ EXPECTED_CPUS=$2
+ ACTUAL_CPUS=
if [[ $CGRP = X* ]]
then
CGRP=${CGRP#X}
@@ -584,41 +676,39 @@ check_effective_cpus()
else
FILE=cpuset.cpus.effective
fi
- [[ $CGRP = A2 ]] && CGRP=A1/A2
- [[ $CGRP = A3 ]] && CGRP=A1/A2/A3
- [[ -e $CGRP/$FILE ]] || return 1
- [[ $CPUS = $(cat $CGRP/$FILE) ]] || return 1
+ set_cgroup_dir $CGRP
+ [[ -e $CGRP_DIR/$FILE ]] || return 1
+ ACTUAL_CPUS=$(cat $CGRP_DIR/$FILE)
+ [[ $EXPECTED_CPUS = $ACTUAL_CPUS ]] || return 1
done
}
#
# Check cgroup states
-# $1 - check string, format: <cgroup>:<state>[,<cgroup>:<state>]*
+# $1 - check string, format: <cgroup>:<state>[|<cgroup>:<state>]*
#
check_cgroup_states()
{
CHK_STR=$1
- for CHK in $(echo $CHK_STR | sed -e "s/,/ /g")
+ for CHK in $(echo $CHK_STR | sed -e "s/|/ /g")
do
set -- $(echo $CHK | sed -e "s/:/ /g")
CGRP=$1
- CGRP_DIR=$CGRP
- STATE=$2
+ EXPECTED_STATE=$2
FILE=
- EVAL=$(expr substr $STATE 2 2)
- [[ $CGRP = A2 ]] && CGRP_DIR=A1/A2
- [[ $CGRP = A3 ]] && CGRP_DIR=A1/A2/A3
+ EVAL=$(expr substr $EXPECTED_STATE 2 2)
- case $STATE in
+ set_cgroup_dir $CGRP
+ case $EXPECTED_STATE in
P*) FILE=$CGRP_DIR/cpuset.cpus.partition
;;
- *) echo "Unknown state: $STATE!"
+ *) echo "Unknown state: $EXPECTED_STATE!"
exit 1
;;
esac
- VAL=$(cat $FILE)
+ ACTUAL_STATE=$(cat $FILE)
- case "$VAL" in
+ case "$ACTUAL_STATE" in
member) VAL=0
;;
root) VAL=1
@@ -642,7 +732,7 @@ check_cgroup_states()
[[ $VAL -eq 1 && $VERBOSE -gt 0 ]] && {
DOMS=$(cat $CGRP_DIR/cpuset.cpus.effective)
[[ -n "$DOMS" ]] &&
- echo " [$CGRP] sched-domain: $DOMS" > $CONSOLE
+ echo " [$CGRP_DIR] sched-domain: $DOMS" > $CONSOLE
}
done
return 0
@@ -665,22 +755,22 @@ check_cgroup_states()
#
check_isolcpus()
{
- EXPECT_VAL=$1
- ISOLCPUS=
+ EXPECTED_ISOLCPUS=$1
+ ISCPUS=${CGROUP2}/cpuset.cpus.isolated
+ ISOLCPUS=$(cat $ISCPUS)
LASTISOLCPU=
SCHED_DOMAINS=/sys/kernel/debug/sched/domains
- ISCPUS=${CGROUP2}/cpuset.cpus.isolated
- if [[ $EXPECT_VAL = . ]]
+ if [[ $EXPECTED_ISOLCPUS = . ]]
then
- EXPECT_VAL=
- EXPECT_VAL2=
- elif [[ $(expr $EXPECT_VAL : ".*,.*") > 0 ]]
+ EXPECTED_ISOLCPUS=
+ EXPECTED_SDOMAIN=
+ elif [[ $(expr $EXPECTED_ISOLCPUS : ".*|.*") > 0 ]]
then
- set -- $(echo $EXPECT_VAL | sed -e "s/,/ /g")
- EXPECT_VAL=$1
- EXPECT_VAL2=$2
+ set -- $(echo $EXPECTED_ISOLCPUS | sed -e "s/|/ /g")
+ EXPECTED_ISOLCPUS=$2
+ EXPECTED_SDOMAIN=$1
else
- EXPECT_VAL2=$EXPECT_VAL
+ EXPECTED_SDOMAIN=$EXPECTED_ISOLCPUS
fi
#
@@ -689,20 +779,21 @@ check_isolcpus()
# to make appending those CPUs easier.
#
[[ -n "$BOOT_ISOLCPUS" ]] && {
- EXPECT_VAL=${EXPECT_VAL:+${EXPECT_VAL},}${BOOT_ISOLCPUS}
- EXPECT_VAL2=${EXPECT_VAL2:+${EXPECT_VAL2},}${BOOT_ISOLCPUS}
+ EXPECTED_ISOLCPUS=${EXPECTED_ISOLCPUS:+${EXPECTED_ISOLCPUS},}${BOOT_ISOLCPUS}
+ EXPECTED_SDOMAIN=${EXPECTED_SDOMAIN:+${EXPECTED_SDOMAIN},}${BOOT_ISOLCPUS}
}
#
# Check cpuset.cpus.isolated cpumask
#
- [[ "$EXPECT_VAL2" != "$ISOLCPUS" ]] && {
+ [[ "$EXPECTED_ISOLCPUS" != "$ISOLCPUS" ]] && {
# Take a 50ms pause and try again
pause 0.05
ISOLCPUS=$(cat $ISCPUS)
}
- [[ "$EXPECT_VAL2" != "$ISOLCPUS" ]] && return 1
+ [[ "$EXPECTED_ISOLCPUS" != "$ISOLCPUS" ]] && return 1
ISOLCPUS=
+ EXPECTED_ISOLCPUS=$EXPECTED_SDOMAIN
#
# Use the sched domain in debugfs to check isolated CPUs, if available
@@ -736,7 +827,7 @@ check_isolcpus()
done
[[ "$ISOLCPUS" = *- ]] && ISOLCPUS=${ISOLCPUS}$LASTISOLCPU
- [[ "$EXPECT_VAL" = "$ISOLCPUS" ]]
+ [[ "$EXPECTED_SDOMAIN" = "$ISOLCPUS" ]]
}
test_fail()
@@ -774,6 +865,63 @@ null_isolcpus_check()
}
#
+# Check state transition test result
+# $1 - Test number
+# $2 - Expected effective CPU values
+# $3 - Expected partition states
+# $4 - Expected isolated CPUs
+#
+check_test_results()
+{
+ _NR=$1
+ _ECPUS="$2"
+ _PSTATES="$3"
+ _ISOLCPUS="$4"
+
+ [[ -n "$_ECPUS" && "$_ECPUS" != . ]] && {
+ check_effective_cpus $_ECPUS
+ [[ $? -ne 0 ]] && test_fail $_NR "effective CPU" \
+ "Cgroup $CGRP: expected $EXPECTED_CPUS, got $ACTUAL_CPUS"
+ }
+
+ [[ -n "$_PSTATES" && "$_PSTATES" != . ]] && {
+ check_cgroup_states $_PSTATES
+ [[ $? -ne 0 ]] && test_fail $_NR states \
+ "Cgroup $CGRP: expected $EXPECTED_STATE, got $ACTUAL_STATE"
+ }
+
+ # Compare the expected isolated CPUs with the actual ones,
+ # if available
+ [[ -n "$_ISOLCPUS" ]] && {
+ check_isolcpus $_ISOLCPUS
+ [[ $? -ne 0 ]] && {
+ [[ -n "$BOOT_ISOLCPUS" ]] && _ISOLCPUS=${_ISOLCPUS},${BOOT_ISOLCPUS}
+ test_fail $_NR "isolated CPU" \
+ "Expect $_ISOLCPUS, get $ISOLCPUS instead"
+ }
+ }
+ reset_cgroup_states
+ #
+ # Check to see if effective cpu list changes
+ #
+ _NEWLIST=$(cat $CGROUP2/cpuset.cpus.effective)
+ RETRY=0
+ while [[ $_NEWLIST != $CPULIST && $RETRY -lt 8 ]]
+ do
+ # Wait a bit longer & recheck a few times
+ pause 0.02
+ ((RETRY++))
+ _NEWLIST=$(cat $CGROUP2/cpuset.cpus.effective)
+ done
+ [[ $_NEWLIST != $CPULIST ]] && {
+ echo "Effective cpus changed to $_NEWLIST after test $_NR!"
+ exit 1
+ }
+ null_isolcpus_check
+ [[ $VERBOSE -gt 0 ]] && echo "Test $I done."
+}
+
+#
# Run cpuset state transition test
# $1 - test matrix name
#
@@ -785,6 +933,8 @@ run_state_test()
{
TEST=$1
CONTROLLER=cpuset
+ CGROUP_LIST=". A1 A1/A2 A1/A2/A3 B1"
+ RESET_LIST="A1/A2/A3 A1/A2 A1 B1"
I=0
eval CNT="\${#$TEST[@]}"
@@ -812,10 +962,11 @@ run_state_test()
STATES=${11}
ICPUS=${12}
- set_ctrl_state_noerr B1 $OLD_B1
set_ctrl_state_noerr A1 $OLD_A1
set_ctrl_state_noerr A1/A2 $OLD_A2
set_ctrl_state_noerr A1/A2/A3 $OLD_A3
+ set_ctrl_state_noerr B1 $OLD_B1
+
RETVAL=0
set_ctrl_state A1 $NEW_A1; ((RETVAL += $?))
set_ctrl_state A1/A2 $NEW_A2; ((RETVAL += $?))
@@ -824,47 +975,79 @@ run_state_test()
[[ $RETVAL -ne $RESULT ]] && test_fail $I result
- [[ -n "$ECPUS" && "$ECPUS" != . ]] && {
- check_effective_cpus $ECPUS
- [[ $? -ne 0 ]] && test_fail $I "effective CPU"
- }
+ check_test_results $I "$ECPUS" "$STATES" "$ICPUS"
+ ((I++))
+ done
+ echo "All $I tests of $TEST PASSED."
+}
- [[ -n "$STATES" && "$STATES" != . ]] && {
- check_cgroup_states $STATES
- [[ $? -ne 0 ]] && test_fail $I states
- }
+#
+# Run cpuset remote partition state transition test
+# $1 - test matrix name
+#
+run_remote_state_test()
+{
+ TEST=$1
+ CONTROLLER=cpuset
+ [[ -d rtest ]] || mkdir rtest
+ cd rtest
+ echo +cpuset > cgroup.subtree_control
+ echo "1-7" > cpuset.cpus
+ echo "1-7" > cpuset.cpus.exclusive
+ CGROUP_LIST=".. . p1 p2 p1/c11 p1/c12 p2/c21 p2/c22"
+ RESET_LIST="p1/c11 p1/c12 p2/c21 p2/c22 p1 p2"
+ I=0
+ eval CNT="\${#$TEST[@]}"
- # Compare the expected isolated CPUs with the actual ones,
- # if available
- [[ -n "$ICPUS" ]] && {
- check_isolcpus $ICPUS
- [[ $? -ne 0 ]] && {
- [[ -n "$BOOT_ISOLCPUS" ]] && ICPUS=${ICPUS},${BOOT_ISOLCPUS}
- test_fail $I "isolated CPU" \
- "Expect $ICPUS, get $ISOLCPUS instead"
- }
- }
- reset_cgroup_states
- #
- # Check to see if effective cpu list changes
- #
- NEWLIST=$(cat cpuset.cpus.effective)
- RETRY=0
- while [[ $NEWLIST != $CPULIST && $RETRY -lt 8 ]]
- do
- # Wait a bit longer & recheck a few times
- pause 0.02
- ((RETRY++))
- NEWLIST=$(cat cpuset.cpus.effective)
- done
- [[ $NEWLIST != $CPULIST ]] && {
- echo "Effective cpus changed to $NEWLIST after test $I!"
- exit 1
+ reset_cgroup_states
+ console_msg "Running remote partition state transition test ..."
+
+ while [[ $I -lt $CNT ]]
+ do
+ echo "Running test $I ..." > $CONSOLE
+ [[ $VERBOSE -gt 1 ]] && {
+ echo ""
+ eval echo \${$TEST[$I]}
}
- null_isolcpus_check
- [[ $VERBOSE -gt 0 ]] && echo "Test $I done."
+ eval set -- "\${$TEST[$I]}"
+ OLD_p1=$1
+ OLD_p2=$2
+ OLD_c11=$3
+ OLD_c12=$4
+ OLD_c21=$5
+ OLD_c22=$6
+ NEW_p1=$7
+ NEW_p2=$8
+ NEW_c11=$9
+ NEW_c12=${10}
+ NEW_c21=${11}
+ NEW_c22=${12}
+ ECPUS=${13}
+ STATES=${14}
+ ICPUS=${15}
+
+ set_ctrl_state_noerr p1 $OLD_p1
+ set_ctrl_state_noerr p2 $OLD_p2
+ set_ctrl_state_noerr p1/c11 $OLD_c11
+ set_ctrl_state_noerr p1/c12 $OLD_c12
+ set_ctrl_state_noerr p2/c21 $OLD_c21
+ set_ctrl_state_noerr p2/c22 $OLD_c22
+
+ RETVAL=0
+ set_ctrl_state p1 $NEW_p1 ; ((RETVAL += $?))
+ set_ctrl_state p2 $NEW_p2 ; ((RETVAL += $?))
+ set_ctrl_state p1/c11 $NEW_c11; ((RETVAL += $?))
+ set_ctrl_state p1/c12 $NEW_c12; ((RETVAL += $?))
+ set_ctrl_state p2/c21 $NEW_c21; ((RETVAL += $?))
+ set_ctrl_state p2/c22 $NEW_c22; ((RETVAL += $?))
+
+ [[ $RETVAL -ne 0 ]] && test_fail $I result
+
+ check_test_results $I "$ECPUS" "$STATES" "$ICPUS"
((I++))
done
+ cd ..
+ rmdir rtest
echo "All $I tests of $TEST PASSED."
}
@@ -932,6 +1115,7 @@ test_isolated()
echo $$ > $CGROUP2/cgroup.procs
[[ -d A1 ]] && rmdir A1
null_isolcpus_check
+ pause 0.05
}
#
@@ -997,10 +1181,13 @@ test_inotify()
else
echo "Inotify test PASSED"
fi
+ echo member > cpuset.cpus.partition
+ echo "" > cpuset.cpus
}
trap cleanup 0 2 3 6
run_state_test TEST_MATRIX
+run_remote_state_test REMOTE_TEST_MATRIX
test_isolated
test_inotify
echo "All tests PASSED."
diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py
index 8b7f6acad15f..7c90a040ce45 100755
--- a/tools/testing/selftests/drivers/net/hds.py
+++ b/tools/testing/selftests/drivers/net/hds.py
@@ -6,7 +6,7 @@ import os
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx
from lib.py import CmdExitFailure, EthtoolFamily, NlError
from lib.py import NetDrvEnv
-from lib.py import defer, ethtool, ip
+from lib.py import defer, ethtool, ip, random
def _get_hds_mode(cfg, netnl) -> str:
@@ -109,6 +109,36 @@ def set_hds_thresh_zero(cfg, netnl) -> None:
ksft_eq(0, rings['hds-thresh'])
+def set_hds_thresh_random(cfg, netnl) -> None:
+ try:
+ rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ except NlError as e:
+ raise KsftSkipEx('ring-get not supported by device')
+ if 'hds-thresh' not in rings:
+ raise KsftSkipEx('hds-thresh not supported by device')
+ if 'hds-thresh-max' not in rings:
+ raise KsftSkipEx('hds-thresh-max not defined by device')
+
+ if rings['hds-thresh-max'] < 2:
+ raise KsftSkipEx('hds-thresh-max is too small')
+ elif rings['hds-thresh-max'] == 2:
+ hds_thresh = 1
+ else:
+ while True:
+ hds_thresh = random.randint(1, rings['hds-thresh-max'] - 1)
+ if hds_thresh != rings['hds-thresh']:
+ break
+
+ try:
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': hds_thresh})
+ except NlError as e:
+ if e.error == errno.EINVAL:
+ raise KsftSkipEx("hds-thresh-set not supported by the device")
+ elif e.error == errno.EOPNOTSUPP:
+ raise KsftSkipEx("ring-set not supported by the device")
+ rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ ksft_eq(hds_thresh, rings['hds-thresh'])
+
def set_hds_thresh_max(cfg, netnl) -> None:
try:
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
@@ -243,6 +273,7 @@ def main() -> None:
get_hds_thresh,
set_hds_disable,
set_hds_enable,
+ set_hds_thresh_random,
set_hds_thresh_zero,
set_hds_thresh_max,
set_hds_thresh_gt,
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc
new file mode 100644
index 000000000000..b6d6a312ead5
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc
@@ -0,0 +1,177 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - function graph filters
+# requires: set_ftrace_filter function_graph:tracer
+
+# Make sure that function graph filtering works
+
+INSTANCE1="instances/test1_$$"
+INSTANCE2="instances/test2_$$"
+
+WD=`pwd`
+
+do_reset() {
+ cd $WD
+ if [ -d $INSTANCE1 ]; then
+ echo nop > $INSTANCE1/current_tracer
+ rmdir $INSTANCE1
+ fi
+ if [ -d $INSTANCE2 ]; then
+ echo nop > $INSTANCE2/current_tracer
+ rmdir $INSTANCE2
+ fi
+}
+
+mkdir $INSTANCE1
+if ! grep -q function_graph $INSTANCE1/available_tracers; then
+ echo "function_graph not allowed with instances"
+ rmdir $INSTANCE1
+ exit_unsupported
+fi
+
+mkdir $INSTANCE2
+
+fail() { # msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+disable_tracing
+clear_trace
+
+function_count() {
+ search=$1
+ vsearch=$2
+
+ if [ -z "$search" ]; then
+ cat enabled_functions | wc -l
+ elif [ -z "$vsearch" ]; then
+ grep $search enabled_functions | wc -l
+ else
+ grep $search enabled_functions | grep $vsearch| wc -l
+ fi
+}
+
+set_fgraph() {
+ instance=$1
+ filter="$2"
+ notrace="$3"
+
+ echo "$filter" > $instance/set_ftrace_filter
+ echo "$notrace" > $instance/set_ftrace_notrace
+ echo function_graph > $instance/current_tracer
+}
+
+check_functions() {
+ orig_cnt=$1
+ test=$2
+
+ cnt=`function_count $test`
+ if [ $cnt -gt $orig_cnt ]; then
+ fail
+ fi
+}
+
+check_cnt() {
+ orig_cnt=$1
+ search=$2
+ vsearch=$3
+
+ cnt=`function_count $search $vsearch`
+ if [ $cnt -gt $orig_cnt ]; then
+ fail
+ fi
+}
+
+reset_graph() {
+ instance=$1
+ echo nop > $instance/current_tracer
+}
+
+# get any functions that were enabled before the test
+total_cnt=`function_count`
+sched_cnt=`function_count sched`
+lock_cnt=`function_count lock`
+time_cnt=`function_count time`
+clock_cnt=`function_count clock`
+locks_clock_cnt=`function_count locks clock`
+clock_locks_cnt=`function_count clock locks`
+
+# Trace functions with "sched" but not "time"
+set_fgraph $INSTANCE1 '*sched*' '*time*'
+
+# Make sure "time" isn't listed
+check_functions $time_cnt 'time'
+instance1_cnt=`function_count`
+
+# Trace functions with "lock" but not "clock"
+set_fgraph $INSTANCE2 '*lock*' '*clock*'
+instance1_2_cnt=`function_count`
+
+# Turn off the first instance
+reset_graph $INSTANCE1
+
+# The second instance doesn't trace "clock" functions
+check_functions $clock_cnt 'clock'
+instance2_cnt=`function_count`
+
+# Start from a clean slate
+reset_graph $INSTANCE2
+check_functions $total_cnt
+
+# Trace functions with "lock" but not "clock"
+set_fgraph $INSTANCE2 '*lock*' '*clock*'
+
+# This should match the last time instance 2 was by itself
+cnt=`function_count`
+if [ $instance2_cnt -ne $cnt ]; then
+ fail
+fi
+
+# And it should not be tracing "clock" functions
+check_functions $clock_cnt 'clock'
+
+# Trace functions with "sched" but not "time"
+set_fgraph $INSTANCE1 '*sched*' '*time*'
+
+# This should match the last time both instances were enabled
+cnt=`function_count`
+if [ $instance1_2_cnt -ne $cnt ]; then
+ fail
+fi
+
+# Turn off the second instance
+reset_graph $INSTANCE2
+
+# This should match the last time instance 1 was by itself
+cnt=`function_count`
+if [ $instance1_cnt -ne $cnt ]; then
+ fail
+fi
+
+# And it should not be tracing "time" functions
+check_functions $time_cnt 'time'
+
+# Start from a clean slate
+reset_graph $INSTANCE1
+check_functions $total_cnt
+
+# Enable all functions but those that have "locks"
+set_fgraph $INSTANCE1 '' '*locks*'
+
+# Enable all functions but those that have "clock"
+set_fgraph $INSTANCE2 '' '*clock*'
+
+# If a function has "locks" it should not have "clock"
+check_cnt $locks_clock_cnt locks clock
+
+# If a function has "clock" it should not have "locks"
+check_cnt $clock_locks_cnt clock locks
+
+reset_graph $INSTANCE1
+reset_graph $INSTANCE2
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index 7d7a6a06cdb7..2d8230da9064 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -98,7 +98,7 @@ int main(int argc, char *argv[])
info("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC);
if (!res || errno != EWOULDBLOCK) {
- ksft_test_result_pass("futex_waitv returned: %d %s\n",
+ ksft_test_result_fail("futex_waitv returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
ret = RET_FAIL;
diff --git a/tools/testing/selftests/hid/config.common b/tools/testing/selftests/hid/config.common
index 45b5570441ce..b1f40857307d 100644
--- a/tools/testing/selftests/hid/config.common
+++ b/tools/testing/selftests/hid/config.common
@@ -39,7 +39,6 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPUSETS=y
-CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_BLAKE2B=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_CRYPTO_SEQIV=y
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index f773f8f99249..f62b0a5aba35 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -50,8 +50,18 @@ LIBKVM_riscv += lib/riscv/ucall.c
# Non-compiled test targets
TEST_PROGS_x86 += x86/nx_huge_pages_test.sh
+# Compiled test targets valid on all architectures with libkvm support
+TEST_GEN_PROGS_COMMON = demand_paging_test
+TEST_GEN_PROGS_COMMON += dirty_log_test
+TEST_GEN_PROGS_COMMON += guest_print_test
+TEST_GEN_PROGS_COMMON += kvm_binary_stats_test
+TEST_GEN_PROGS_COMMON += kvm_create_max_vcpus
+TEST_GEN_PROGS_COMMON += kvm_page_table_test
+TEST_GEN_PROGS_COMMON += set_memory_region_test
+
# Compiled test targets
-TEST_GEN_PROGS_x86 = x86/cpuid_test
+TEST_GEN_PROGS_x86 = $(TEST_GEN_PROGS_COMMON)
+TEST_GEN_PROGS_x86 += x86/cpuid_test
TEST_GEN_PROGS_x86 += x86/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86 += x86/dirty_log_page_splitting_test
TEST_GEN_PROGS_x86 += x86/feature_msrs_test
@@ -119,27 +129,21 @@ TEST_GEN_PROGS_x86 += x86/triple_fault_event_test
TEST_GEN_PROGS_x86 += x86/recalc_apic_map_test
TEST_GEN_PROGS_x86 += access_tracking_perf_test
TEST_GEN_PROGS_x86 += coalesced_io_test
-TEST_GEN_PROGS_x86 += demand_paging_test
-TEST_GEN_PROGS_x86 += dirty_log_test
TEST_GEN_PROGS_x86 += dirty_log_perf_test
TEST_GEN_PROGS_x86 += guest_memfd_test
-TEST_GEN_PROGS_x86 += guest_print_test
TEST_GEN_PROGS_x86 += hardware_disable_test
-TEST_GEN_PROGS_x86 += kvm_create_max_vcpus
-TEST_GEN_PROGS_x86 += kvm_page_table_test
TEST_GEN_PROGS_x86 += memslot_modification_stress_test
TEST_GEN_PROGS_x86 += memslot_perf_test
TEST_GEN_PROGS_x86 += mmu_stress_test
TEST_GEN_PROGS_x86 += rseq_test
-TEST_GEN_PROGS_x86 += set_memory_region_test
TEST_GEN_PROGS_x86 += steal_time
-TEST_GEN_PROGS_x86 += kvm_binary_stats_test
TEST_GEN_PROGS_x86 += system_counter_offset_test
TEST_GEN_PROGS_x86 += pre_fault_memory_test
# Compiled outputs used by test targets
TEST_GEN_PROGS_EXTENDED_x86 += x86/nx_huge_pages_test
+TEST_GEN_PROGS_arm64 = $(TEST_GEN_PROGS_COMMON)
TEST_GEN_PROGS_arm64 += arm64/aarch32_id_regs
TEST_GEN_PROGS_arm64 += arm64/arch_timer_edge_cases
TEST_GEN_PROGS_arm64 += arm64/debug-exceptions
@@ -158,22 +162,16 @@ TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3
TEST_GEN_PROGS_arm64 += access_tracking_perf_test
TEST_GEN_PROGS_arm64 += arch_timer
TEST_GEN_PROGS_arm64 += coalesced_io_test
-TEST_GEN_PROGS_arm64 += demand_paging_test
-TEST_GEN_PROGS_arm64 += dirty_log_test
TEST_GEN_PROGS_arm64 += dirty_log_perf_test
-TEST_GEN_PROGS_arm64 += guest_print_test
TEST_GEN_PROGS_arm64 += get-reg-list
-TEST_GEN_PROGS_arm64 += kvm_create_max_vcpus
-TEST_GEN_PROGS_arm64 += kvm_page_table_test
TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
TEST_GEN_PROGS_arm64 += memslot_perf_test
TEST_GEN_PROGS_arm64 += mmu_stress_test
TEST_GEN_PROGS_arm64 += rseq_test
-TEST_GEN_PROGS_arm64 += set_memory_region_test
TEST_GEN_PROGS_arm64 += steal_time
-TEST_GEN_PROGS_arm64 += kvm_binary_stats_test
-TEST_GEN_PROGS_s390 = s390/memop
+TEST_GEN_PROGS_s390 = $(TEST_GEN_PROGS_COMMON)
+TEST_GEN_PROGS_s390 += s390/memop
TEST_GEN_PROGS_s390 += s390/resets
TEST_GEN_PROGS_s390 += s390/sync_regs_test
TEST_GEN_PROGS_s390 += s390/tprot
@@ -182,27 +180,14 @@ TEST_GEN_PROGS_s390 += s390/debug_test
TEST_GEN_PROGS_s390 += s390/cpumodel_subfuncs_test
TEST_GEN_PROGS_s390 += s390/shared_zeropage_test
TEST_GEN_PROGS_s390 += s390/ucontrol_test
-TEST_GEN_PROGS_s390 += demand_paging_test
-TEST_GEN_PROGS_s390 += dirty_log_test
-TEST_GEN_PROGS_s390 += guest_print_test
-TEST_GEN_PROGS_s390 += kvm_create_max_vcpus
-TEST_GEN_PROGS_s390 += kvm_page_table_test
TEST_GEN_PROGS_s390 += rseq_test
-TEST_GEN_PROGS_s390 += set_memory_region_test
-TEST_GEN_PROGS_s390 += kvm_binary_stats_test
+TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON)
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
TEST_GEN_PROGS_riscv += riscv/ebreak_test
TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += coalesced_io_test
-TEST_GEN_PROGS_riscv += demand_paging_test
-TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list
-TEST_GEN_PROGS_riscv += guest_print_test
-TEST_GEN_PROGS_riscv += kvm_binary_stats_test
-TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
-TEST_GEN_PROGS_riscv += kvm_page_table_test
-TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += steal_time
SPLIT_TESTS += arch_timer
diff --git a/tools/testing/selftests/kvm/arm64/page_fault_test.c b/tools/testing/selftests/kvm/arm64/page_fault_test.c
index ec33a8f9c908..dc6559dad9d8 100644
--- a/tools/testing/selftests/kvm/arm64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/arm64/page_fault_test.c
@@ -199,7 +199,7 @@ static bool guest_set_ha(void)
if (hadbs == 0)
return false;
- tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
+ tcr = read_sysreg(tcr_el1) | TCR_HA;
write_sysreg(tcr, tcr_el1);
isb();
diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h
index 1e8d0d531fbd..b0fc0f945766 100644
--- a/tools/testing/selftests/kvm/include/arm64/processor.h
+++ b/tools/testing/selftests/kvm/include/arm64/processor.h
@@ -62,6 +62,67 @@
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
+/* TCR_EL1 specific flags */
+#define TCR_T0SZ_OFFSET 0
+#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
+
+#define TCR_IRGN0_SHIFT 8
+#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
+
+#define TCR_ORGN0_SHIFT 10
+#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT)
+
+#define TCR_SH0_SHIFT 12
+#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT)
+#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT)
+
+#define TCR_TG0_SHIFT 14
+#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
+#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
+
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)
+#define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT)
+#define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT)
+#define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT)
+#define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT)
+
+#define TCR_HA (UL(1) << 39)
+#define TCR_DS (UL(1) << 59)
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PTE_ATTRINDX(t) ((t) << 2)
+#define PTE_ATTRINDX_MASK GENMASK(4, 2)
+#define PTE_ATTRINDX_SHIFT 2
+
+#define PTE_VALID BIT(0)
+#define PGD_TYPE_TABLE BIT(1)
+#define PUD_TYPE_TABLE BIT(1)
+#define PMD_TYPE_TABLE BIT(1)
+#define PTE_TYPE_PAGE BIT(1)
+
+#define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */
+#define PTE_AF BIT(10)
+
+#define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift))
+#define PTE_ADDR_51_48 GENMASK(15, 12)
+#define PTE_ADDR_51_48_SHIFT 12
+#define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift))
+#define PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
+#define PTE_ADDR_51_50_LPA2_SHIFT 8
+
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code);
@@ -102,12 +163,6 @@ enum {
(v) == VECTOR_SYNC_LOWER_64 || \
(v) == VECTOR_SYNC_LOWER_32)
-/* Access flag */
-#define PTE_AF (1ULL << 10)
-
-/* Access flag update enable/disable */
-#define TCR_EL1_HA (1ULL << 39)
-
void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
uint32_t *ipa16k, uint32_t *ipa64k);
diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c
index 7ba3aa3755f3..9d69904cb608 100644
--- a/tools/testing/selftests/kvm/lib/arm64/processor.c
+++ b/tools/testing/selftests/kvm/lib/arm64/processor.c
@@ -72,13 +72,13 @@ static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
uint64_t pte;
if (use_lpa2_pte_format(vm)) {
- pte = pa & GENMASK(49, vm->page_shift);
- pte |= FIELD_GET(GENMASK(51, 50), pa) << 8;
- attrs &= ~GENMASK(9, 8);
+ pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift);
+ pte |= FIELD_GET(GENMASK(51, 50), pa) << PTE_ADDR_51_50_LPA2_SHIFT;
+ attrs &= ~PTE_ADDR_51_50_LPA2;
} else {
- pte = pa & GENMASK(47, vm->page_shift);
+ pte = pa & PTE_ADDR_MASK(vm->page_shift);
if (vm->page_shift == 16)
- pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
+ pte |= FIELD_GET(GENMASK(51, 48), pa) << PTE_ADDR_51_48_SHIFT;
}
pte |= attrs;
@@ -90,12 +90,12 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
uint64_t pa;
if (use_lpa2_pte_format(vm)) {
- pa = pte & GENMASK(49, vm->page_shift);
- pa |= FIELD_GET(GENMASK(9, 8), pte) << 50;
+ pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift);
+ pa |= FIELD_GET(PTE_ADDR_51_50_LPA2, pte) << 50;
} else {
- pa = pte & GENMASK(47, vm->page_shift);
+ pa = pte & PTE_ADDR_MASK(vm->page_shift);
if (vm->page_shift == 16)
- pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
+ pa |= FIELD_GET(PTE_ADDR_51_48, pte) << 48;
}
return pa;
@@ -128,7 +128,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint64_t flags)
{
- uint8_t attr_idx = flags & 7;
+ uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
+ uint64_t pg_attr;
uint64_t *ptep;
TEST_ASSERT((vaddr % vm->page_size) == 0,
@@ -147,18 +148,21 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
if (!*ptep)
- *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
+ *ptep = addr_pte(vm, vm_alloc_page_table(vm),
+ PGD_TYPE_TABLE | PTE_VALID);
switch (vm->pgtable_levels) {
case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
if (!*ptep)
- *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
+ *ptep = addr_pte(vm, vm_alloc_page_table(vm),
+ PUD_TYPE_TABLE | PTE_VALID);
/* fall through */
case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
if (!*ptep)
- *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
+ *ptep = addr_pte(vm, vm_alloc_page_table(vm),
+ PMD_TYPE_TABLE | PTE_VALID);
/* fall through */
case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
@@ -167,7 +171,11 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
TEST_FAIL("Page table levels must be 2, 3, or 4");
}
- *ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3); /* AF */
+ pg_attr = PTE_AF | PTE_ATTRINDX(attr_idx) | PTE_TYPE_PAGE | PTE_VALID;
+ if (!use_lpa2_pte_format(vm))
+ pg_attr |= PTE_SHARED;
+
+ *ptep = addr_pte(vm, paddr, pg_attr);
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -293,20 +301,20 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
case VM_MODE_P48V48_64K:
case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
- tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= TCR_TG0_64K;
break;
case VM_MODE_P52V48_16K:
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V47_16K:
- tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
+ tcr_el1 |= TCR_TG0_16K;
break;
case VM_MODE_P52V48_4K:
case VM_MODE_P48V48_4K:
case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
- tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= TCR_TG0_4K;
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
@@ -319,35 +327,35 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
case VM_MODE_P52V48_4K:
case VM_MODE_P52V48_16K:
case VM_MODE_P52V48_64K:
- tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+ tcr_el1 |= TCR_IPS_52_BITS;
ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P48V48_16K:
case VM_MODE_P48V48_64K:
- tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
+ tcr_el1 |= TCR_IPS_48_BITS;
break;
case VM_MODE_P40V48_4K:
case VM_MODE_P40V48_16K:
case VM_MODE_P40V48_64K:
- tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
+ tcr_el1 |= TCR_IPS_40_BITS;
break;
case VM_MODE_P36V48_4K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V48_64K:
case VM_MODE_P36V47_16K:
- tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
+ tcr_el1 |= TCR_IPS_36_BITS;
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
- sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
- /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
- tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
- tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
+ sctlr_el1 |= SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_I;
+
+ tcr_el1 |= TCR_IRGN0_WBWA | TCR_ORGN0_WBWA | TCR_SH0_INNER;
+ tcr_el1 |= TCR_T0SZ(vm->va_bits);
if (use_lpa2_pte_format(vm))
- tcr_el1 |= (1ul << 59) /* DS */;
+ tcr_el1 |= TCR_DS;
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 279ad8946040..815bc45dd8dc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -2019,9 +2019,8 @@ static struct exit_reason {
KVM_EXIT_STRING(RISCV_SBI),
KVM_EXIT_STRING(RISCV_CSR),
KVM_EXIT_STRING(NOTIFY),
-#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
- KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
-#endif
+ KVM_EXIT_STRING(LOONGARCH_IOCSR),
+ KVM_EXIT_STRING(MEMORY_FAULT),
};
/*
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index e5898678bfab..1375fca80bcd 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -196,25 +196,27 @@ static void calc_min_max_cpu(void)
static void help(const char *name)
{
puts("");
- printf("usage: %s [-h] [-u]\n", name);
+ printf("usage: %s [-h] [-u] [-l latency]\n", name);
printf(" -u: Don't sanity check the number of successful KVM_RUNs\n");
+ printf(" -l: Set /dev/cpu_dma_latency to suppress deep sleep states\n");
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
+ int r, i, snapshot, opt, fd = -1, latency = -1;
bool skip_sanity_check = false;
- int r, i, snapshot;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
u32 cpu, rseq_cpu;
- int opt;
- while ((opt = getopt(argc, argv, "hu")) != -1) {
+ while ((opt = getopt(argc, argv, "hl:u")) != -1) {
switch (opt) {
case 'u':
skip_sanity_check = true;
+ case 'l':
+ latency = atoi_paranoid(optarg);
break;
case 'h':
default:
@@ -243,6 +245,20 @@ int main(int argc, char *argv[])
pthread_create(&migration_thread, NULL, migration_worker,
(void *)(unsigned long)syscall(SYS_gettid));
+ if (latency >= 0) {
+ /*
+ * Writes to cpu_dma_latency persist only while the file is
+ * open, i.e. it allows userspace to provide guaranteed latency
+ * while running a workload. Keep the file open until the test
+ * completes, otherwise writing cpu_dma_latency is meaningless.
+ */
+ fd = open("/dev/cpu_dma_latency", O_RDWR);
+ TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("open() /dev/cpu_dma_latency", fd));
+
+ r = write(fd, &latency, 4);
+ TEST_ASSERT(r >= 1, "Error setting /dev/cpu_dma_latency");
+ }
+
for (i = 0; !done; i++) {
vcpu_run(vcpu);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
@@ -278,6 +294,9 @@ int main(int argc, char *argv[])
"rseq CPU = %d, sched CPU = %d", rseq_cpu, cpu);
}
+ if (fd > 0)
+ close(fd);
+
/*
* Sanity check that the test was able to enter the guest a reasonable
* number of times, e.g. didn't get stalled too often/long waiting for
@@ -293,8 +312,8 @@ int main(int argc, char *argv[])
TEST_ASSERT(skip_sanity_check || i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n\n"
" Try disabling deep sleep states to reduce CPU wakeup latency,\n"
- " e.g. via cpuidle.off=1 or setting /dev/cpu_dma_latency to '0',\n"
- " or run with -u to disable this sanity check.", i);
+ " e.g. via cpuidle.off=1 or via -l <latency>, or run with -u to\n"
+ " disable this sanity check.", i);
pthread_join(migration_thread, NULL);
diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
index 2b550eff35f1..390ae2d87493 100644
--- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
+++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
@@ -7,6 +7,7 @@
#include "kvm_util.h"
#include "processor.h"
+#include "kselftest.h"
#define CPUID_MWAIT (1u << 3)
@@ -14,6 +15,8 @@ enum monitor_mwait_testcases {
MWAIT_QUIRK_DISABLED = BIT(0),
MISC_ENABLES_QUIRK_DISABLED = BIT(1),
MWAIT_DISABLED = BIT(2),
+ CPUID_DISABLED = BIT(3),
+ TEST_MAX = CPUID_DISABLED * 2 - 1,
};
/*
@@ -35,11 +38,19 @@ do { \
testcase, vector); \
} while (0)
-static void guest_monitor_wait(int testcase)
+static void guest_monitor_wait(void *arg)
{
+ int testcase = (int) (long) arg;
u8 vector;
- GUEST_SYNC(testcase);
+ u64 val = rdmsr(MSR_IA32_MISC_ENABLE) & ~MSR_IA32_MISC_ENABLE_MWAIT;
+ if (!(testcase & MWAIT_DISABLED))
+ val |= MSR_IA32_MISC_ENABLE_MWAIT;
+ wrmsr(MSR_IA32_MISC_ENABLE, val);
+
+ __GUEST_ASSERT(this_cpu_has(X86_FEATURE_MWAIT) == !(testcase & MWAIT_DISABLED),
+ "Expected CPUID.MWAIT %s\n",
+ (testcase & MWAIT_DISABLED) ? "cleared" : "set");
/*
* Arbitrarily MONITOR this function, SVM performs fault checks before
@@ -50,19 +61,6 @@ static void guest_monitor_wait(int testcase)
vector = kvm_asm_safe("mwait", "a"(guest_monitor_wait), "c"(0), "d"(0));
GUEST_ASSERT_MONITOR_MWAIT("MWAIT", testcase, vector);
-}
-
-static void guest_code(void)
-{
- guest_monitor_wait(MWAIT_DISABLED);
-
- guest_monitor_wait(MWAIT_QUIRK_DISABLED | MWAIT_DISABLED);
-
- guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_DISABLED);
- guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED);
-
- guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_QUIRK_DISABLED | MWAIT_DISABLED);
- guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_QUIRK_DISABLED);
GUEST_DONE();
}
@@ -74,56 +72,64 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct ucall uc;
int testcase;
+ char test[80];
- TEST_REQUIRE(this_cpu_has(X86_FEATURE_MWAIT));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT);
+ ksft_print_header();
+ ksft_set_plan(12);
+ for (testcase = 0; testcase <= TEST_MAX; testcase++) {
+ vm = vm_create_with_one_vcpu(&vcpu, guest_monitor_wait);
+ vcpu_args_set(vcpu, 1, (void *)(long)testcase);
+
+ disabled_quirks = 0;
+ if (testcase & MWAIT_QUIRK_DISABLED) {
+ disabled_quirks |= KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS;
+ strcpy(test, "MWAIT can fault");
+ } else {
+ strcpy(test, "MWAIT never faults");
+ }
+ if (testcase & MISC_ENABLES_QUIRK_DISABLED) {
+ disabled_quirks |= KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT;
+ strcat(test, ", MISC_ENABLE updates CPUID");
+ } else {
+ strcat(test, ", no CPUID updates");
+ }
+
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, disabled_quirks);
+
+ if (!(testcase & MISC_ENABLES_QUIRK_DISABLED) &&
+ (!!(testcase & CPUID_DISABLED) ^ !!(testcase & MWAIT_DISABLED)))
+ continue;
+
+ if (testcase & CPUID_DISABLED) {
+ strcat(test, ", CPUID clear");
+ vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT);
+ } else {
+ strcat(test, ", CPUID set");
+ vcpu_set_cpuid_feature(vcpu, X86_FEATURE_MWAIT);
+ }
+
+ if (testcase & MWAIT_DISABLED)
+ strcat(test, ", MWAIT disabled");
- while (1) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- testcase = uc.args[1];
- break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- goto done;
+ /* Detected in vcpu_run */
+ break;
case UCALL_DONE:
- goto done;
+ ksft_test_result_pass("%s\n", test);
+ break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
- goto done;
- }
-
- disabled_quirks = 0;
- if (testcase & MWAIT_QUIRK_DISABLED)
- disabled_quirks |= KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS;
- if (testcase & MISC_ENABLES_QUIRK_DISABLED)
- disabled_quirks |= KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT;
- vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, disabled_quirks);
-
- /*
- * If the MISC_ENABLES quirk (KVM neglects to update CPUID to
- * enable/disable MWAIT) is disabled, toggle the ENABLE_MWAIT
- * bit in MISC_ENABLES accordingly. If the quirk is enabled,
- * the only valid configuration is MWAIT disabled, as CPUID
- * can't be manually changed after running the vCPU.
- */
- if (!(testcase & MISC_ENABLES_QUIRK_DISABLED)) {
- TEST_ASSERT(testcase & MWAIT_DISABLED,
- "Can't toggle CPUID features after running vCPU");
- continue;
+ break;
}
-
- vcpu_set_msr(vcpu, MSR_IA32_MISC_ENABLE,
- (testcase & MWAIT_DISABLED) ? 0 : MSR_IA32_MISC_ENABLE_MWAIT);
+ kvm_vm_free(vm);
}
+ ksft_finished();
-done:
- kvm_vm_free(vm);
return 0;
}
diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
index e949a43a6145..efabfcbe0b49 100644
--- a/tools/testing/selftests/mincore/mincore_selftest.c
+++ b/tools/testing/selftests/mincore/mincore_selftest.c
@@ -261,9 +261,6 @@ TEST(check_file_mmap)
TH_LOG("No read-ahead pages found in memory");
}
- EXPECT_LT(i, vec_size) {
- TH_LOG("Read-ahead pages reached the end of the file");
- }
/*
* End of the readahead window. The rest of the pages shouldn't
* be in memory.
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 679542f565a4..532bb732bc6d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -39,6 +39,7 @@ scm_rights
sk_bind_sendto_listen
sk_connect_zero_addr
sk_so_peek_off
+skf_net_off
socket
so_incoming_cpu
so_netns_cookie
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 6d718b478ed8..124078b56fa4 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -106,6 +106,8 @@ TEST_PROGS += ipv6_route_update_soft_lockup.sh
TEST_PROGS += busy_poll_test.sh
TEST_GEN_PROGS += proc_net_pktgen
TEST_PROGS += lwt_dst_cache_ref_loop.sh
+TEST_PROGS += skf_net_off.sh
+TEST_GEN_FILES += skf_net_off
# YNL files, must be before "include ..lib.mk"
YNL_GEN_FILES := busy_poller netlink-dumps
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 13a3b68181ee..befa66f5a366 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -1441,6 +1441,15 @@ chk_join_nr()
fi
fi
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckHMacFailure")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "0" ]; then
+ rc=${KSFT_FAIL}
+ print_check "synack HMAC"
+ fail_test "got $count JOIN[s] synack HMAC failure expected 0"
+ fi
+
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckRx")
if [ -z "$count" ]; then
rc=${KSFT_SKIP}
@@ -1450,6 +1459,15 @@ chk_join_nr()
fail_test "got $count JOIN[s] ack rx expected $ack_nr"
fi
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckHMacFailure")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "0" ]; then
+ rc=${KSFT_FAIL}
+ print_check "ack HMAC"
+ fail_test "got $count JOIN[s] ack HMAC failure expected 0"
+ fi
+
print_results "join Rx" ${rc}
join_syn_tx="${join_syn_tx:-${syn_nr}}" \
diff --git a/tools/testing/selftests/net/netfilter/nft_concat_range.sh b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
index 47088b005390..1f5979c1510c 100755
--- a/tools/testing/selftests/net/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
@@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
-BUGS="flush_remove_add reload net_port_proto_match"
+BUGS="flush_remove_add reload net_port_proto_match avx2_mismatch"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
@@ -387,6 +387,25 @@ race_repeat 0
perf_duration 0
"
+
+TYPE_avx2_mismatch="
+display avx2 false match
+type_spec inet_proto . ipv6_addr
+chain_spec meta l4proto . ip6 daddr
+dst proto addr6
+src
+start 1
+count 1
+src_delta 1
+tools ping
+proto icmp6
+
+race_repeat 0
+
+perf_duration 0
+"
+
+
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@@ -1629,6 +1648,24 @@ test_bug_net_port_proto_match() {
nft flush ruleset
}
+test_bug_avx2_mismatch()
+{
+ setup veth send_"${proto}" set || return ${ksft_skip}
+
+ local a1="fe80:dead:01ff:0a02:0b03:6007:8009:a001"
+ local a2="fe80:dead:01fe:0a02:0b03:6007:8009:a001"
+
+ nft "add element inet filter test { icmpv6 . $a1 }"
+
+ dst_addr6="$a2"
+ send_icmp6
+
+ if [ "$(count_packets)" -gt "0" ]; then
+ err "False match for $a2"
+ return 1
+ fi
+}
+
test_reported_issues() {
eval test_bug_"${subtest}"
}
diff --git a/tools/testing/selftests/net/skf_net_off.c b/tools/testing/selftests/net/skf_net_off.c
new file mode 100644
index 000000000000..1fdf61d6cd7f
--- /dev/null
+++ b/tools/testing/selftests/net/skf_net_off.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Open a tun device.
+ *
+ * [modifications: use IFF_NAPI_FRAGS, add sk filter]
+ *
+ * Expects the device to have been configured previously, e.g.:
+ * sudo ip tuntap add name tap1 mode tap
+ * sudo ip link set tap1 up
+ * sudo ip link set dev tap1 addr 02:00:00:00:00:01
+ * sudo ip -6 addr add fdab::1 peer fdab::2 dev tap1 nodad
+ *
+ * And to avoid premature pskb_may_pull:
+ *
+ * sudo ethtool -K tap1 gro off
+ * sudo bash -c 'echo 0 > /proc/sys/net/ipv4/ip_early_demux'
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <linux/filter.h>
+#include <linux/if.h>
+#include <linux/if_packet.h>
+#include <linux/if_tun.h>
+#include <linux/ipv6.h>
+#include <netinet/if_ether.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/poll.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <unistd.h>
+
+static bool cfg_do_filter;
+static bool cfg_do_frags;
+static int cfg_dst_port = 8000;
+static char *cfg_ifname;
+
+static int tun_open(const char *tun_name)
+{
+ struct ifreq ifr = {0};
+ int fd, ret;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd == -1)
+ error(1, errno, "open /dev/net/tun");
+
+ ifr.ifr_flags = IFF_TAP;
+ if (cfg_do_frags)
+ ifr.ifr_flags |= IFF_NAPI | IFF_NAPI_FRAGS;
+
+ strncpy(ifr.ifr_name, tun_name, IFNAMSIZ - 1);
+
+ ret = ioctl(fd, TUNSETIFF, &ifr);
+ if (ret)
+ error(1, ret, "ioctl TUNSETIFF");
+
+ return fd;
+}
+
+static void sk_set_filter(int fd)
+{
+ const int offset_proto = offsetof(struct ip6_hdr, ip6_nxt);
+ const int offset_dport = sizeof(struct ip6_hdr) + offsetof(struct udphdr, dest);
+
+ /* Filter UDP packets with destination port cfg_dst_port */
+ struct sock_filter filter_code[] = {
+ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, PACKET_HOST, 0, 4),
+ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, SKF_NET_OFF + offset_proto),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 0, 2),
+ BPF_STMT(BPF_LD + BPF_H + BPF_ABS, SKF_NET_OFF + offset_dport),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, cfg_dst_port, 1, 0),
+ BPF_STMT(BPF_RET + BPF_K, 0),
+ BPF_STMT(BPF_RET + BPF_K, 0xFFFF),
+ };
+
+ struct sock_fprog filter = {
+ sizeof(filter_code) / sizeof(filter_code[0]),
+ filter_code,
+ };
+
+ if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &filter, sizeof(filter)))
+ error(1, errno, "setsockopt attach filter");
+}
+
+static int raw_open(void)
+{
+ int fd;
+
+ fd = socket(PF_INET6, SOCK_RAW, IPPROTO_UDP);
+ if (fd == -1)
+ error(1, errno, "socket raw (udp)");
+
+ if (cfg_do_filter)
+ sk_set_filter(fd);
+
+ return fd;
+}
+
+static void tun_write(int fd)
+{
+ const char eth_src[] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x02 };
+ const char eth_dst[] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x01 };
+ struct tun_pi pi = {0};
+ struct ipv6hdr ip6h = {0};
+ struct udphdr uh = {0};
+ struct ethhdr eth = {0};
+ uint32_t payload;
+ struct iovec iov[5];
+ int ret;
+
+ pi.proto = htons(ETH_P_IPV6);
+
+ memcpy(eth.h_source, eth_src, sizeof(eth_src));
+ memcpy(eth.h_dest, eth_dst, sizeof(eth_dst));
+ eth.h_proto = htons(ETH_P_IPV6);
+
+ ip6h.version = 6;
+ ip6h.payload_len = htons(sizeof(uh) + sizeof(uint32_t));
+ ip6h.nexthdr = IPPROTO_UDP;
+ ip6h.hop_limit = 8;
+ if (inet_pton(AF_INET6, "fdab::2", &ip6h.saddr) != 1)
+ error(1, errno, "inet_pton src");
+ if (inet_pton(AF_INET6, "fdab::1", &ip6h.daddr) != 1)
+ error(1, errno, "inet_pton src");
+
+ uh.source = htons(8000);
+ uh.dest = htons(cfg_dst_port);
+ uh.len = ip6h.payload_len;
+ uh.check = 0;
+
+ payload = htonl(0xABABABAB); /* Covered in IPv6 length */
+
+ iov[0].iov_base = &pi;
+ iov[0].iov_len = sizeof(pi);
+ iov[1].iov_base = &eth;
+ iov[1].iov_len = sizeof(eth);
+ iov[2].iov_base = &ip6h;
+ iov[2].iov_len = sizeof(ip6h);
+ iov[3].iov_base = &uh;
+ iov[3].iov_len = sizeof(uh);
+ iov[4].iov_base = &payload;
+ iov[4].iov_len = sizeof(payload);
+
+ ret = writev(fd, iov, sizeof(iov) / sizeof(iov[0]));
+ if (ret <= 0)
+ error(1, errno, "writev");
+}
+
+static void raw_read(int fd)
+{
+ struct timeval tv = { .tv_usec = 100 * 1000 };
+ struct msghdr msg = {0};
+ struct iovec iov[2];
+ struct udphdr uh;
+ uint32_t payload[2];
+ int ret;
+
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
+ error(1, errno, "setsockopt rcvtimeo udp");
+
+ iov[0].iov_base = &uh;
+ iov[0].iov_len = sizeof(uh);
+
+ iov[1].iov_base = payload;
+ iov[1].iov_len = sizeof(payload);
+
+ msg.msg_iov = iov;
+ msg.msg_iovlen = sizeof(iov) / sizeof(iov[0]);
+
+ ret = recvmsg(fd, &msg, 0);
+ if (ret <= 0)
+ error(1, errno, "read raw");
+ if (ret != sizeof(uh) + sizeof(payload[0]))
+ error(1, errno, "read raw: len=%d\n", ret);
+
+ fprintf(stderr, "raw recv: 0x%x\n", payload[0]);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt(argc, argv, "fFi:")) != -1) {
+ switch (c) {
+ case 'f':
+ cfg_do_filter = true;
+ printf("bpf filter enabled\n");
+ break;
+ case 'F':
+ cfg_do_frags = true;
+ printf("napi frags mode enabled\n");
+ break;
+ case 'i':
+ cfg_ifname = optarg;
+ break;
+ default:
+ error(1, 0, "unknown option %c", optopt);
+ break;
+ }
+ }
+
+ if (!cfg_ifname)
+ error(1, 0, "must specify tap interface name (-i)");
+}
+
+int main(int argc, char **argv)
+{
+ int fdt, fdr;
+
+ parse_opts(argc, argv);
+
+ fdr = raw_open();
+ fdt = tun_open(cfg_ifname);
+
+ tun_write(fdt);
+ raw_read(fdr);
+
+ if (close(fdt))
+ error(1, errno, "close tun");
+ if (close(fdr))
+ error(1, errno, "close udp");
+
+ fprintf(stderr, "OK\n");
+ return 0;
+}
+
diff --git a/tools/testing/selftests/net/skf_net_off.sh b/tools/testing/selftests/net/skf_net_off.sh
new file mode 100755
index 000000000000..5da5066fb465
--- /dev/null
+++ b/tools/testing/selftests/net/skf_net_off.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+readonly NS="ns-$(mktemp -u XXXXXX)"
+
+cleanup() {
+ ip netns del $NS
+}
+
+ip netns add $NS
+trap cleanup EXIT
+
+ip -netns $NS link set lo up
+ip -netns $NS tuntap add name tap1 mode tap
+ip -netns $NS link set tap1 up
+ip -netns $NS link set dev tap1 addr 02:00:00:00:00:01
+ip -netns $NS -6 addr add fdab::1 peer fdab::2 dev tap1 nodad
+ip netns exec $NS ethtool -K tap1 gro off
+
+# disable early demux, else udp_v6_early_demux pulls udp header into linear
+ip netns exec $NS sysctl -w net.ipv4.ip_early_demux=0
+
+echo "no filter"
+ip netns exec $NS ./skf_net_off -i tap1
+
+echo "filter, linear skb (-f)"
+ip netns exec $NS ./skf_net_off -i tap1 -f
+
+echo "filter, fragmented skb (-f) (-F)"
+ip netns exec $NS ./skf_net_off -i tap1 -f -F
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 9a85f93c33d8..5ded3b3a7538 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -1753,6 +1753,42 @@ TEST_F(tls_basic, rekey_tx)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST_F(tls_basic, disconnect)
+{
+ char const *test_str = "test_message";
+ int send_len = strlen(test_str) + 1;
+ struct tls_crypto_info_keys key;
+ struct sockaddr_in addr;
+ char buf[20];
+ int ret;
+
+ if (self->notls)
+ return;
+
+ tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128,
+ &key, 0);
+
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &key, key.len);
+ ASSERT_EQ(ret, 0);
+
+ /* Pre-queue the data so that setsockopt parses it but doesn't
+ * dequeue it from the TCP socket. recvmsg would dequeue.
+ */
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &key, key.len);
+ ASSERT_EQ(ret, 0);
+
+ addr.sin_family = AF_UNSPEC;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+ ret = connect(self->cfd, &addr, sizeof(addr));
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno, EOPNOTSUPP);
+
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+}
+
TEST_F(tls, rekey)
{
char const *test_str_1 = "test_message_before_rekey";
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
index 25454fd95537..d4ea9cd845a3 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
@@ -158,5 +158,160 @@
"$TC qdisc del dev $DUMMY handle 1: root",
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
]
+ },
+ {
+ "id": "a4bb",
+ "name": "Test FQ_CODEL with HTB parent - force packet drop with empty queue",
+ "category": [
+ "qdisc",
+ "fq_codel",
+ "htb"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1: root htb default 10",
+ "$TC class add dev $DUMMY parent 1: classid 1:10 htb rate 1kbit",
+ "$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
+ "$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
+ "ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "sleep 0.1"
+ ],
+ "cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
+ "matchPattern": "dropped [1-9][0-9]*",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "a4be",
+ "name": "Test FQ_CODEL with QFQ parent - force packet drop with empty queue",
+ "category": [
+ "qdisc",
+ "fq_codel",
+ "qfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1: root qfq",
+ "$TC class add dev $DUMMY parent 1: classid 1:10 qfq weight 1 maxpkt 1000",
+ "$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
+ "$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
+ "ping -c 10 -s 1000 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "sleep 0.1"
+ ],
+ "cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
+ "matchPattern": "dropped [1-9][0-9]*",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "a4bf",
+ "name": "Test FQ_CODEL with HFSC parent - force packet drop with empty queue",
+ "category": [
+ "qdisc",
+ "fq_codel",
+ "hfsc"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1: root hfsc default 10",
+ "$TC class add dev $DUMMY parent 1: classid 1:10 hfsc sc rate 1kbit ul rate 1kbit",
+ "$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
+ "$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
+ "ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "sleep 0.1"
+ ],
+ "cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
+ "matchPattern": "dropped [1-9][0-9]*",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "a4c0",
+ "name": "Test FQ_CODEL with DRR parent - force packet drop with empty queue",
+ "category": [
+ "qdisc",
+ "fq_codel",
+ "drr"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1: root drr",
+ "$TC class add dev $DUMMY parent 1: classid 1:10 drr quantum 1500",
+ "$TC qdisc add dev $DUMMY parent 1:10 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
+ "$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:10",
+ "ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "sleep 0.1"
+ ],
+ "cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
+ "matchPattern": "dropped [1-9][0-9]*",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "a4c1",
+ "name": "Test FQ_CODEL with ETS parent - force packet drop with empty queue",
+ "category": [
+ "qdisc",
+ "fq_codel",
+ "ets"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1: root ets bands 2 strict 1",
+ "$TC class change dev $DUMMY parent 1: classid 1:1 ets",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 10: fq_codel memory_limit 1 flows 1 target 0.1ms interval 1ms",
+ "$TC filter add dev $DUMMY parent 1: protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1",
+ "ping -c 5 -f -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "sleep 0.1"
+ ],
+ "cmdUnderTest": "$TC -s qdisc show dev $DUMMY",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY | grep -A 5 'qdisc fq_codel'",
+ "matchPattern": "dropped [1-9][0-9]*",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
index 50e8d72781cb..28c6ce6da7db 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
@@ -228,5 +228,41 @@
"matchCount": "0",
"teardown": [
]
+ },
+ {
+ "id": "7f8f",
+ "name": "Check that a derived limit of 1 is rejected (limit 2 depth 1 flows 1)",
+ "category": [
+ "qdisc",
+ "sfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq limit 2 depth 1 flows 1",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "sfq",
+ "matchCount": "0",
+ "teardown": []
+ },
+ {
+ "id": "5168",
+ "name": "Check that a derived limit of 1 is rejected (limit 2 depth 1 divisor 1)",
+ "category": [
+ "qdisc",
+ "sfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq limit 2 depth 1 divisor 1",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "sfq",
+ "matchCount": "0",
+ "teardown": []
}
]
diff --git a/tools/testing/selftests/tpm2/.gitignore b/tools/testing/selftests/tpm2/.gitignore
new file mode 100644
index 000000000000..6d6165c5e35d
--- /dev/null
+++ b/tools/testing/selftests/tpm2/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+AsyncTest.log
+SpaceTest.log
diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
index 168f4b166234..3a60e6c6f5c9 100755
--- a/tools/testing/selftests/tpm2/test_smoke.sh
+++ b/tools/testing/selftests/tpm2/test_smoke.sh
@@ -6,6 +6,6 @@ ksft_skip=4
[ -e /dev/tpm0 ] || exit $ksft_skip
read tpm_version < /sys/class/tpm/tpm0/tpm_version_major
-[ "$tpm_version" == 2 ] || exit $ksft_skip
+[ "$tpm_version" = 2 ] || exit $ksft_skip
python3 -m unittest -v tpm2_tests.SmokeTest 2>&1
diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh
new file mode 100755
index 000000000000..1f2b642381d1
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stripe_04.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="stripe_04"
+ERR_CODE=0
+
+_prep_test "stripe" "mkfs & mount & umount on zero copy"
+
+backfile_0=$(_create_backfile 256M)
+backfile_1=$(_create_backfile 256M)
+dev_id=$(_add_ublk_dev -t stripe -z -q 2 "$backfile_0" "$backfile_1")
+_check_add_dev $TID $? "$backfile_0" "$backfile_1"
+
+_mkfs_mount_test /dev/ublkb"${dev_id}"
+ERR_CODE=$?
+
+_cleanup_test "stripe"
+
+_remove_backfile "$backfile_0"
+_remove_backfile "$backfile_1"
+
+_show_result $TID $ERR_CODE
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 746e1f466aa6..727b542074e7 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -75,7 +75,7 @@ config KVM_COMPAT
depends on KVM && COMPAT && !(S390 || ARM64 || RISCV)
config HAVE_KVM_IRQ_BYPASS
- bool
+ tristate
select IRQ_BYPASS_MANAGER
config HAVE_KVM_VCPU_ASYNC_IOCTL
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 249ba5b72e9b..11e5d1e3f12e 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -149,7 +149,7 @@ irqfd_shutdown(struct work_struct *work)
/*
* It is now safe to release the object's resources
*/
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
irq_bypass_unregister_consumer(&irqfd->consumer);
#endif
eventfd_ctx_put(irqfd->eventfd);
@@ -274,7 +274,7 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
write_seqcount_end(&irqfd->irq_entry_sc);
}
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
void __attribute__((weak)) kvm_arch_irq_bypass_stop(
struct irq_bypass_consumer *cons)
{
@@ -424,7 +424,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
if (kvm_arch_has_irq_bypass()) {
irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
@@ -609,14 +609,14 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
/* Under irqfds.lock, so can read irq_entry safely */
struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
#endif
irqfd_update(kvm, irqfd);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
if (irqfd->producer &&
kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
int ret = kvm_arch_update_irqfd_routing(